text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyBiopython(PythonPackage):
"""A distributed collaborative effort to develop Python libraries and
applications which address the needs of current and future work in
bioinformatics.
"""
homepage = "http://biopython.org/wiki/Main_Page"
url = "http://biopython.org/DIST/biopython-1.65.tar.gz"
version('1.70', 'feff7a3e2777e43f9b13039b344e06ff')
version('1.65', '143e7861ade85c0a8b5e2bbdd1da1f67')
depends_on('py-numpy', type=('build', 'run'))
|
krafczyk/spack
|
var/spack/repos/builtin/packages/py-biopython/package.py
|
Python
|
lgpl-2.1
| 1,753
|
[
"Biopython"
] |
c6265185d86834774ff93fa7c542780ff3298e7ff9ce17f54dd0467b9405c08a
|
"""
Provides rolling statistical moments and related descriptive
statistics implemented in Cython
"""
from __future__ import division
import warnings
import numpy as np
from pandas.core.dtypes.common import is_scalar
from pandas.core.api import DataFrame, Series
from pandas.util._decorators import Substitution, Appender
__all__ = ['rolling_count', 'rolling_max', 'rolling_min',
'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov',
'rolling_corr', 'rolling_var', 'rolling_skew', 'rolling_kurt',
'rolling_quantile', 'rolling_median', 'rolling_apply',
'rolling_window',
'ewma', 'ewmvar', 'ewmstd', 'ewmvol', 'ewmcorr', 'ewmcov',
'expanding_count', 'expanding_max', 'expanding_min',
'expanding_sum', 'expanding_mean', 'expanding_std',
'expanding_cov', 'expanding_corr', 'expanding_var',
'expanding_skew', 'expanding_kurt', 'expanding_quantile',
'expanding_median', 'expanding_apply']
# -----------------------------------------------------------------------------
# Docs
# The order of arguments for the _doc_template is:
# (header, args, kwargs, returns, notes)
_doc_template = """
%s
Parameters
----------
%s%s
Returns
-------
%s
%s
"""
_roll_kw = """window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
how : string, default '%s'
Method for down- or re-sampling
"""
_roll_notes = r"""
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_ewm_kw = r"""com : float, optional
Specify decay in terms of center of mass,
:math:`\alpha = 1 / (1 + com),\text{ for } com \geq 0`
span : float, optional
Specify decay in terms of span,
:math:`\alpha = 2 / (span + 1),\text{ for } span \geq 1`
halflife : float, optional
Specify decay in terms of half-life,
:math:`\alpha = 1 - exp(log(0.5) / halflife),\text{ for } halflife > 0`
alpha : float, optional
Specify smoothing factor :math:`\alpha` directly,
:math:`0 < \alpha \leq 1`
.. versionadded:: 0.18.0
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings (viewing EWMA as a moving average)
how : string, default 'mean'
Method for down- or re-sampling
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
"""
_ewm_notes = r"""
Notes
-----
Exactly one of center of mass, span, half-life, and alpha must be provided.
Allowed values and relationship between the parameters are specified in the
parameter descriptions above; see the link at the end of this section for
a detailed explanation.
When adjust is True (default), weighted averages are calculated using weights
(1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on
relative positions. For example, the weights of x and y used in calculating
the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-windows
"""
_expanding_kw = """min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
"""
_type_of_input_retval = "y : type of input argument"
_flex_retval = """y : type depends on inputs
DataFrame / DataFrame -> DataFrame (matches on columns) or Panel (pairwise)
DataFrame / Series -> Computes result for each column
Series / Series -> Series"""
_pairwise_retval = "y : Panel whose items are df1.index values"
_unary_arg = "arg : Series, DataFrame\n"
_binary_arg_flex = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray, optional
if not supplied then will default to arg1 and produce pairwise output
"""
_binary_arg = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray
"""
_pairwise_arg = """df1 : DataFrame
df2 : DataFrame
"""
_pairwise_kw = """pairwise : bool, default False
If False then only matching columns between arg1 and arg2 will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a Panel in the case of DataFrame inputs. In the case of missing
elements, only complete pairwise observations will be used.
"""
_ddof_kw = """ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
_bias_kw = r"""bias : boolean, default False
Use a standard estimation bias correction
"""
def ensure_compat(dispatch, name, arg, func_kw=None, *args, **kwargs):
"""
wrapper function to dispatch to the appropriate window functions
wraps/unwraps ndarrays for compat
can be removed when ndarray support is removed
"""
is_ndarray = isinstance(arg, np.ndarray)
if is_ndarray:
if arg.ndim == 1:
arg = Series(arg)
elif arg.ndim == 2:
arg = DataFrame(arg)
else:
raise AssertionError("cannot support ndim > 2 for ndarray compat")
warnings.warn("pd.{dispatch}_{name} is deprecated for ndarrays and "
"will be removed "
"in a future version"
.format(dispatch=dispatch, name=name),
FutureWarning, stacklevel=3)
# get the functional keywords here
if func_kw is None:
func_kw = []
kwds = {}
for k in func_kw:
value = kwargs.pop(k, None)
if value is not None:
kwds[k] = value
# how is a keyword that if not-None should be in kwds
how = kwargs.pop('how', None)
if how is not None:
kwds['how'] = how
r = getattr(arg, dispatch)(**kwargs)
if not is_ndarray:
# give a helpful deprecation message
# with copy-pastable arguments
pargs = ','.join(["{a}={b}".format(a=a, b=b)
for a, b in kwargs.items() if b is not None])
aargs = ','.join(args)
if len(aargs):
aargs += ','
def f(a, b):
if is_scalar(b):
return "{a}={b}".format(a=a, b=b)
return "{a}=<{b}>".format(a=a, b=type(b).__name__)
aargs = ','.join([f(a, b) for a, b in kwds.items() if b is not None])
warnings.warn("pd.{dispatch}_{name} is deprecated for {klass} "
"and will be removed in a future version, replace with "
"\n\t{klass}.{dispatch}({pargs}).{name}({aargs})"
.format(klass=type(arg).__name__, pargs=pargs,
aargs=aargs, dispatch=dispatch, name=name),
FutureWarning, stacklevel=3)
result = getattr(r, name)(*args, **kwds)
if is_ndarray:
result = result.values
return result
def rolling_count(arg, window, **kwargs):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
rolling_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('rolling', 'count', arg, window=window, **kwargs)
@Substitution("Unbiased moving covariance.", _binary_arg_flex,
_roll_kw % 'None' + _pairwise_kw + _ddof_kw, _flex_retval,
_roll_notes)
@Appender(_doc_template)
def rolling_cov(arg1, arg2=None, window=None, pairwise=None, **kwargs):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
return ensure_compat('rolling',
'cov',
arg1,
other=arg2,
window=window,
pairwise=pairwise,
func_kw=['other', 'pairwise', 'ddof'],
**kwargs)
@Substitution("Moving sample correlation.", _binary_arg_flex,
_roll_kw % 'None' + _pairwise_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_corr(arg1, arg2=None, window=None, pairwise=None, **kwargs):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
return ensure_compat('rolling',
'corr',
arg1,
other=arg2,
window=window,
pairwise=pairwise,
func_kw=['other', 'pairwise'],
**kwargs)
# -----------------------------------------------------------------------------
# Exponential moving moments
@Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw,
_type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewma(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0,
freq=None, adjust=True, how=None, ignore_na=False):
return ensure_compat('ewm',
'mean',
arg,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
how=how,
ignore_na=ignore_na)
@Substitution("Exponentially-weighted moving variance", _unary_arg,
_ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmvar(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0,
bias=False, freq=None, how=None, ignore_na=False, adjust=True):
return ensure_compat('ewm',
'var',
arg,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
how=how,
ignore_na=ignore_na,
bias=bias,
func_kw=['bias'])
@Substitution("Exponentially-weighted moving std", _unary_arg,
_ewm_kw + _bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmstd(arg, com=None, span=None, halflife=None, alpha=None, min_periods=0,
bias=False, freq=None, how=None, ignore_na=False, adjust=True):
return ensure_compat('ewm',
'std',
arg,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
adjust=adjust,
how=how,
ignore_na=ignore_na,
bias=bias,
func_kw=['bias'])
ewmvol = ewmstd
@Substitution("Exponentially-weighted moving covariance", _binary_arg_flex,
_ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None,
min_periods=0, bias=False, freq=None, pairwise=None, how=None,
ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('ewm',
'cov',
arg1,
other=arg2,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
bias=bias,
freq=freq,
how=how,
ignore_na=ignore_na,
adjust=adjust,
pairwise=pairwise,
func_kw=['other', 'pairwise', 'bias'])
@Substitution("Exponentially-weighted moving correlation", _binary_arg_flex,
_ewm_kw + _pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, pairwise=None, how=None, ignore_na=False,
adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('ewm',
'corr',
arg1,
other=arg2,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
freq=freq,
how=how,
ignore_na=ignore_na,
adjust=adjust,
pairwise=pairwise,
func_kw=['other', 'pairwise'])
# ---------------------------------------------------------------------
# Python interface to Cython functions
def _rolling_func(name, desc, how=None, func_kw=None, additional_kw=''):
if how is None:
how_arg_str = 'None'
else:
how_arg_str = "'%s" % how
@Substitution(desc, _unary_arg, _roll_kw % how_arg_str + additional_kw,
_type_of_input_retval, _roll_notes)
@Appender(_doc_template)
def f(arg, window, min_periods=None, freq=None, center=False,
**kwargs):
return ensure_compat('rolling',
name,
arg,
window=window,
min_periods=min_periods,
freq=freq,
center=center,
func_kw=func_kw,
**kwargs)
return f
rolling_max = _rolling_func('max', 'Moving maximum.', how='max')
rolling_min = _rolling_func('min', 'Moving minimum.', how='min')
rolling_sum = _rolling_func('sum', 'Moving sum.')
rolling_mean = _rolling_func('mean', 'Moving mean.')
rolling_median = _rolling_func('median', 'Moving median.', how='median')
rolling_std = _rolling_func('std', 'Moving standard deviation.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
rolling_var = _rolling_func('var', 'Moving variance.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
rolling_skew = _rolling_func('skew', 'Unbiased moving skewness.')
rolling_kurt = _rolling_func('kurt', 'Unbiased moving kurtosis.')
def rolling_quantile(arg, window, quantile, min_periods=None, freq=None,
center=False):
"""Moving quantile.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('rolling',
'quantile',
arg,
window=window,
freq=freq,
center=center,
min_periods=min_periods,
func_kw=['quantile'],
quantile=quantile)
def rolling_apply(arg, window, func, min_periods=None, freq=None,
center=False, args=(), kwargs={}):
"""Generic moving function application.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('rolling',
'apply',
arg,
window=window,
freq=freq,
center=center,
min_periods=min_periods,
func_kw=['func', 'args', 'kwargs'],
func=func,
args=args,
kwargs=kwargs)
def rolling_window(arg, window=None, win_type=None, min_periods=None,
freq=None, center=False, mean=True,
axis=0, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` and size ``window``
on the data.
Parameters
----------
arg : Series, DataFrame
window : int or ndarray
Weighting window specification. If the window is an integer, then it is
treated as the window length and win_type is required
win_type : str, default None
Window type (see Notes)
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
mean : boolean, default True
If True computes weighted mean, else weighted sum
axis : {0, 1}, default 0
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
y : type of input argument
Notes
-----
The recognized window types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
func = 'mean' if mean else 'sum'
return ensure_compat('rolling',
func,
arg,
window=window,
win_type=win_type,
freq=freq,
center=center,
min_periods=min_periods,
axis=axis,
func_kw=kwargs.keys(),
**kwargs)
def _expanding_func(name, desc, func_kw=None, additional_kw=''):
@Substitution(desc, _unary_arg, _expanding_kw + additional_kw,
_type_of_input_retval, "")
@Appender(_doc_template)
def f(arg, min_periods=1, freq=None, **kwargs):
return ensure_compat('expanding',
name,
arg,
min_periods=min_periods,
freq=freq,
func_kw=func_kw,
**kwargs)
return f
expanding_max = _expanding_func('max', 'Expanding maximum.')
expanding_min = _expanding_func('min', 'Expanding minimum.')
expanding_sum = _expanding_func('sum', 'Expanding sum.')
expanding_mean = _expanding_func('mean', 'Expanding mean.')
expanding_median = _expanding_func('median', 'Expanding median.')
expanding_std = _expanding_func('std', 'Expanding standard deviation.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
expanding_var = _expanding_func('var', 'Expanding variance.',
func_kw=['ddof'],
additional_kw=_ddof_kw)
expanding_skew = _expanding_func('skew', 'Unbiased expanding skewness.')
expanding_kurt = _expanding_func('kurt', 'Unbiased expanding kurtosis.')
def expanding_count(arg, freq=None):
"""
Expanding count of number of non-NaN observations.
Parameters
----------
arg : DataFrame or numpy ndarray-like
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
Returns
-------
expanding_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('expanding', 'count', arg, freq=freq)
def expanding_quantile(arg, quantile, min_periods=1, freq=None):
"""Expanding quantile.
Parameters
----------
arg : Series, DataFrame
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('expanding',
'quantile',
arg,
freq=freq,
min_periods=min_periods,
func_kw=['quantile'],
quantile=quantile)
@Substitution("Unbiased expanding covariance.", _binary_arg_flex,
_expanding_kw + _pairwise_kw + _ddof_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_cov(arg1, arg2=None, min_periods=1, freq=None,
pairwise=None, ddof=1):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('expanding',
'cov',
arg1,
other=arg2,
min_periods=min_periods,
pairwise=pairwise,
freq=freq,
ddof=ddof,
func_kw=['other', 'pairwise', 'ddof'])
@Substitution("Expanding sample correlation.", _binary_arg_flex,
_expanding_kw + _pairwise_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
return ensure_compat('expanding',
'corr',
arg1,
other=arg2,
min_periods=min_periods,
pairwise=pairwise,
freq=freq,
func_kw=['other', 'pairwise', 'ddof'])
def expanding_apply(arg, func, min_periods=1, freq=None,
args=(), kwargs={}):
"""Generic expanding function application.
Parameters
----------
arg : Series, DataFrame
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the
statistic. Specified as a frequency string or DateOffset object.
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
return ensure_compat('expanding',
'apply',
arg,
freq=freq,
min_periods=min_periods,
func_kw=['func', 'args', 'kwargs'],
func=func,
args=args,
kwargs=kwargs)
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/pandas/stats/moments.py
|
Python
|
mit
| 31,620
|
[
"Gaussian"
] |
1306abd7585ff069b7720910bf076ff560653f2e200af472d1e204f39806fd07
|
from octopus.core import app, initialise, add_configuration
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action="store_true", help="pycharm debug support enable")
parser.add_argument("-c", "--config", help="additional configuration to load (e.g. for testing)")
args = parser.parse_args()
if args.config:
add_configuration(app, args.config)
pycharm_debug = app.config.get('DEBUG_PYCHARM', False)
if args.debug:
pycharm_debug = True
if pycharm_debug:
app.config['DEBUG'] = False
import pydevd
pydevd.settrace(app.config.get('DEBUG_SERVER_HOST', 'localhost'), port=app.config.get('DEBUG_SERVER_PORT', 51234), stdoutToServer=True, stderrToServer=True)
print "STARTED IN REMOTE DEBUG MODE"
initialise()
# most of the imports should be done here, after initialise()
from flask import render_template
from octopus.lib.webapp import custom_static
from service import models # needed even if unused below, to let Alembic pick up DB schema changes
@app.route("/")
def index():
return render_template("index.html")
@app.route("/projects")
def projects():
from service.models import Project
projects = [Project(url="url1", name="Project1"), Project(url="url2", name="Project2"), Project(url="url3", name="Project3")] #Project.new(url="foo", name="boo")]
return render_template("projects.html", projects=projects)
# this allows us to override the standard static file handling with our own dynamic version
@app.route("/static/<path:filename>")
def static(filename):
return custom_static(filename)
# this allows us to serve our standard javascript config
from octopus.modules.clientjs.configjs import blueprint as configjs
app.register_blueprint(configjs)
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html'), 404
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=app.config['DEBUG'], port=app.config['PORT'], threaded=False)
|
CottageLabs/finance
|
service/web.py
|
Python
|
apache-2.0
| 2,056
|
[
"Octopus"
] |
074982963fc68af6b36fd2a8100cfdd8069e636c807f0faaceca84361925113a
|
# Package Imports
from ..workspace import Block, Disconnected, Cancelled
from .machines import machine_declaration
# Twisted Imports
from twisted.internet import reactor, defer, task
# Octopus Imports
from octopus import data
from octopus.data.errors import Immutable
from octopus.data.data import BaseVariable
from octopus.constants import State
import octopus.transport.basic
from octopus.image.data import Image
from octopus.image import functions as image_functions
# Python Imports
from time import time as now
from typing import Tuple
import os
# Numpy
import numpy
__exclude_blocks__ = [
"_image_block",
]
class _image_block (Block):
def _calculate (self, result):
return result
def eval (self):
def calculate (result):
if result is None:
return None
return self._calculate(result)
self._complete = self.getInputValue('INPUT', None)
self._complete.addCallback(calculate)
return self._complete
class image_findcolour (_image_block):
_map = {
"RED": lambda r, g, b: image_functions.__sub__(r, g),
"GREEN": lambda r, g, b: image_functions.__sub__(g, r),
"BLUE": lambda r, g, b: image_functions.__sub__(b, r),
}
def _calculate (self, result: Image) -> Image:
if result is None:
return None
op = self._map[self.fields['OP']]
return op(*image_functions.splitChannels(result))
# Emit a warning if bad op given
class image_threshold (_image_block):
def _calculate (self, result: Image) -> Image:
return image_functions.threshold(result, int(self.fields['THRESHOLD']))
class image_erode (_image_block):
def _calculate (self, result: Image) -> Image:
return image_functions.erode(result)
class image_invert (_image_block):
def _calculate (self, result: Image) -> Image:
return image_functions.invert(result)
class image_colourdistance (Block):
def _calculate (self, input: Image, colour: Tuple[int, int, int]) -> Image:
return image_functions.colorDistance(input, color = colour)
def eval (self):
def calculate (results):
input, colour = results
if input is None or colour is None:
return None
return self._calculate(input, colour)
self._complete = defer.gatherResults([
self.getInputValue('INPUT', None),
self.getInputValue('COLOUR', (0, 0, 0))
]).addCallback(calculate)
return self._complete
class image_huedistance (image_colourdistance):
def _calculate (self, input, colour):
return image_functions.hueDistance(input, colour)
class image_crop (_image_block):
def _calculate (self, result):
x = int(self.fields['X'])
y = int(self.fields['Y'])
w = int(self.fields['W'])
h = int(self.fields['H'])
if result is None:
return None
return image_functions.crop(result, x, y, w, h)
class image_intensityfn (_image_block):
outputType = float
_map = {
"MAX": numpy.max,
"MIN": numpy.min,
"MEAN": numpy.mean,
"MEDIAN": numpy.median
}
def _calculate (self, result):
if result is None:
return
op = self._map[self.fields['OP']]
return int(op(image_functions.getGrayNumpy(result)))
# Emit a warning if bad op given
class image_tonumber (_image_block):
outputType = int
_map = {
"CENTROIDX": lambda blob: blob.centroid()[0],
"CENTROIDY": lambda blob: blob.centroid()[1],
"SIZEX": lambda blob: blob.minRectWidth(),
"SIZEY": lambda blob: blob.minRectHeight(),
}
def _calculate (self, result):
try:
blobs = result.findBlobs(100) # min_size
blob = blobs.sortArea()[-1]
except AttributeError:
return None
op = self._map[self.fields['OP']]
return op(blob)
# Emit a warning if bad op given
class machine_imageprovider (machine_declaration):
def getMachineClass (self):
from octopus.image.provider import ImageProvider
return ImageProvider
class machine_singletracker (machine_declaration):
def getMachineClass (self):
from octopus.image import tracker
return tracker.SingleBlobTracker
class machine_multitracker (machine_declaration):
def getMachineClass (self):
from octopus.image import tracker
return tracker.MultiBlobTracker
def getMachineParams (self):
import json
try:
return {
"count": json.loads(self.mutation)['count']
}
except (ValueError, KeyError):
return {}
class connection_cvcamera (Block):
def eval (self):
if os.name == 'nt':
from octopus.image.source import webcam_nothread
cv_webcam = webcam_nothread
else:
from octopus.image.source import cv_webcam
return defer.succeed(cv_webcam(int(self.fields['ID'])))
class connection_camera_proxy (Block):
def eval (self):
from octopus.image.source import camera_proxy
return defer.succeed(camera_proxy(
str(self.fields['HOST']),
int(self.fields['PORT']),
str(self.fields['ID'])
))
|
richardingham/octopus
|
octopus/blocktopus/blocks/images.py
|
Python
|
mit
| 4,698
|
[
"Octopus"
] |
5b439205d38dab6e382ad9ff531b26c02098f72bb9b2dc49836d07947ccea65c
|
from __future__ import annotations
import inspect
from xia2.Modules.DoseAccumulate import accumulate_dose
class XSample:
"""An object representation of a sample."""
def __init__(self, name, crystal):
"""Create a new sample named name, belonging to XCrystal object crystal."""
# set up this object
self._name = name
self._crystal = crystal
# then create space to store things which are contained
# in here - the sweeps
self._sweeps = []
self.multi_indexer = None
self.multi_refiner = None
def get_epoch_to_dose(self):
epoch_to_dose = accumulate_dose(
[sweep.get_imageset() for sweep in self._sweeps]
)
return epoch_to_dose
# from matplotlib import pyplot
# for i, sweep in enumerate(self._sweeps):
# epochs = sweep.get_imageset().get_scan().get_epochs()
# pyplot.scatter(
# list(epochs), [epoch_to_dose[e] for e in epochs],
# marker='+', color='bg'[i])
# pyplot.show()
# serialization functions
def to_dict(self):
obj = {}
obj["__id__"] = "XSample"
attributes = inspect.getmembers(self, lambda m: not (inspect.isroutine(m)))
for a in attributes:
if a[0] == "_sweeps":
sweeps = []
for sweep in a[1]:
sweeps.append(sweep.to_dict())
obj[a[0]] = sweeps
elif a[0] == "_crystal":
# don't serialize this since the parent xsample *should* contain
# the reference to the child xsweep
continue
elif a[0] in ["multi_indexer", "multi_refiner"] and a[1] is not None:
obj[a[0]] = a[1].to_dict()
elif a[0].startswith("__"):
continue
else:
obj[a[0]] = a[1]
return obj
@classmethod
def from_dict(cls, obj):
assert obj["__id__"] == "XSample"
return_obj = cls(name=None, crystal=None)
for k, v in obj.items():
if k == "_sweeps":
v = [s_dict["_name"] for s_dict in v]
elif k in ["multi_indexer", "multi_refiner"] and v is not None:
from libtbx.utils import import_python_object
cls = import_python_object(
import_path=".".join((v["__module__"], v["__name__"])),
error_prefix="",
target_must_be="",
where_str="",
).object
v = cls.from_dict(v)
setattr(return_obj, k, v)
return return_obj
def get_output(self):
result = "Sample name: %s\n" % self._name
result += "Sweeps:\n"
return result[:-1]
def get_crystal(self):
return self._crystal
def get_name(self):
return self._name
def add_sweep(self, sweep):
self._sweeps.append(sweep)
def get_sweeps(self):
return self._sweeps
def remove_sweep(self, sweep):
"""Remove a sweep object from this wavelength."""
try:
self._sweeps.remove(sweep)
except ValueError:
pass
|
xia2/xia2
|
src/xia2/Schema/XSample.py
|
Python
|
bsd-3-clause
| 3,220
|
[
"CRYSTAL"
] |
ffbac8d0795bb8ea85b5be626d54544759eee61e33586b9683a9913d45d7b2cb
|
#!/usr/bin/env python
# Run
#
# python setup.py build
#
# to build this extension in place.
# To use it, the extension should be copied to the appropriate AlGDock
# subdirectory for extensions.
package_name = "MMTK"
from distutils.core import setup, Command, Extension
from distutils.command.build import build
from distutils.command.sdist import sdist
from distutils.command.install_data import install_data
from distutils import dir_util
from distutils.filelist import FileList, translate_pattern
import distutils.sysconfig
sysconfig = distutils.sysconfig.get_config_vars()
import os, sys, types
import ctypes, ctypes.util
from glob import glob
if os.environ.get('MMTKHOME'):
mmtk_home = os.environ['MMTKHOME']
else:
mmtk_home = '/home/lspirido/Installers/0Work/0David/PoseFF/MMTK-2.7.9/'
mmtk_home = '/Users/dminh/Installers/MMTK-2.7.9/'
poseff_src = os.getcwd() + '/'
#poseff_src = '/home/lspirido/tmp/PoseFF/'
#sys.path.insert(1, mmtk_home)
class Dummy:
pass
pkginfo = Dummy()
execfile(mmtk_home + 'MMTK/__pkginfo__.py', pkginfo.__dict__)
# Check for Cython and use it if the environment variable
# MMTK_USE_CYTHON is set to a non-zero value.
use_cython = int(os.environ.get('MMTK_USE_CYTHON', '0')) != 0
if use_cython:
try:
from Cython.Distutils import build_ext
use_cython = True
except ImportError:
use_cython = False
if not use_cython:
from distutils.command.build_ext import build_ext
src_ext = 'pyx' if use_cython else 'c'
# Check that we have Scientific 2.6 or higher
try:
from Scientific import __version__ as scientific_version
if scientific_version[-2:] == 'hg':
scientific_version = scientific_version[:-2]
scientific_version = scientific_version.split('.')
scientific_ok = int(scientific_version[0]) >= 2 and \
int(scientific_version[1]) >= 6
except ImportError:
scientific_ok = False
if not scientific_ok:
print "MMTK needs ScientificPython 2.6 or higher"
raise SystemExit
compile_args = []
include_dirs = [mmtk_home + 'Include']
if (int(scientific_version[1]) >= 8 or \
(int(scientific_version[1]) == 7 and int(scientific_version[2]) >= 8)):
netcdf_h = os.path.join(sys.prefix, 'include',
'python%d.%d' % sys.version_info[:2],
'Scientific', 'netcdf.h')
print "netcdf.h path", netcdf_h
if os.path.exists(netcdf_h):
compile_args.append("-DUSE_NETCDF_H_FROM_SCIENTIFIC=1")
include_dirs.append(os.path.join(sys.prefix, 'include', 'python%d.%d' % sys.version_info[:2])) # EU
else:
# Take care of the common problem that netcdf is in /usr/local but
# /usr/local/include is not on $CPATH.
if os.path.exists('/usr/local/include/netcdf.h'):
include_dirs.append('/usr/local/include')
from Scientific import N
try:
num_package = N.package
except AttributeError:
num_package = "Numeric"
if num_package == "NumPy":
compile_args.append("-DNUMPY=1")
import numpy.distutils.misc_util
include_dirs.extend(numpy.distutils.misc_util.get_numpy_include_dirs())
headers = glob(os.path.join ("Include", "MMTK", "*.h"))
paths = [os.path.join(mmtk_home + 'MMTK', 'ForceFields'),
os.path.join(mmtk_home + 'MMTK', 'ForceFields', 'Amber'),
os.path.join(mmtk_home + 'MMTK', 'Database', 'Atoms'),
os.path.join(mmtk_home + 'MMTK', 'Database', 'Groups'),
os.path.join(mmtk_home + 'MMTK', 'Database', 'Molecules'),
os.path.join(mmtk_home + 'MMTK', 'Database', 'Complexes'),
os.path.join(mmtk_home + 'MMTK', 'Database', 'Proteins'),
os.path.join(mmtk_home + 'MMTK', 'Database', 'PDB'),
os.path.join(mmtk_home + 'MMTK', 'Tools', 'TrajectoryViewer')]
data_files = []
for dir in paths:
files = []
for f in glob(os.path.join(dir, '*')):
if f[-3:] != '.py' and f[-4:-1] != '.py' and os.path.isfile(f):
files.append(f)
data_files.append((dir, files))
class ModifiedFileList(FileList):
#def findall(self, dir=os.curdir):
def findall(self, dir=mmtk_home):
from stat import ST_MODE, S_ISREG, S_ISDIR, S_ISLNK
list = []
stack = [dir]
pop = stack.pop
push = stack.append
while stack:
dir = pop()
names = os.listdir(dir)
for name in names:
if dir != os.curdir:
fullname = os.path.join(dir, name)
else:
fullname = name
stat = os.stat(fullname)
mode = stat[ST_MODE]
if S_ISREG(mode):
list.append(fullname)
elif S_ISDIR(mode) and not S_ISLNK(mode):
list.append(fullname)
push(fullname)
self.allfiles = list
class modified_build(build):
def has_sphinx(self):
if sphinx is None:
return False
setup_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.isdir(os.path.join(setup_dir, 'Doc'))
sub_commands = build.sub_commands + [('build_sphinx', has_sphinx)]
class modified_sdist(sdist):
def run (self):
self.filelist = ModifiedFileList()
self.check_metadata()
self.get_file_list()
if self.manifest_only:
return
self.make_distribution()
def make_release_tree (self, base_dir, files):
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files,
verbose=self.verbose, dry_run=self.dry_run)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = "making hard links in %s..." % base_dir
else: # nope, have to copy
link = None
msg = "copying files to %s..." % base_dir
if not files:
self.warn("no files to distribute -- empty manifest?")
else:
self.announce(msg)
for file in files:
if os.path.isfile(file):
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
elif os.path.isdir(file):
dir_util.mkpath(os.path.join(base_dir, file))
else:
self.warn("'%s' not a regular file or directory -- skipping"
% file)
class modified_install_data(install_data):
def run(self):
install_cmd = self.get_finalized_command('install')
self.install_dir = getattr(install_cmd, 'install_lib')
return install_data.run(self)
class test(Command):
user_options = []
def initialize_options(self):
self.build_lib = None
def finalize_options(self):
self.set_undefined_options('build',
('build_lib', 'build_lib'))
def run(self):
import sys, subprocess
self.run_command('build_py')
self.run_command('build_ext')
ff = sum((fns for dir, fns in data_files if 'ForceFields' in dir), [])
for fn in ff:
self.copy_file(fn,
os.path.join(self.build_lib, fn),
preserve_mode=False)
subprocess.call([sys.executable, 'Tests/all_tests.py'],
env={'PYTHONPATH': self.build_lib,
'MMTKDATABASE': 'MMTK/Database'})
cmdclass = {
'build' : modified_build,
'sdist': modified_sdist,
'install_data': modified_install_data,
'build_ext': build_ext,
'test': test
}
# Build the sphinx documentation if Sphinx is available
try:
import sphinx
except ImportError:
sphinx = None
if sphinx:
from sphinx.setup_command import BuildDoc as _BuildDoc
class BuildDoc(_BuildDoc):
def run(self):
# make sure the python path is pointing to the newly built
# code so that the documentation is built on this and not a
# previously installed version
build = self.get_finalized_command('build')
sys.path.insert(0, os.path.abspath(build.build_lib))
ff = sum((fns for dir, fns in data_files if 'ForceFields' in dir),
[])
for fn in ff:
self.copy_file(fn,
os.path.join(build.build_lib, fn),
preserve_mode=False)
try:
sphinx.setup_command.BuildDoc.run(self)
except UnicodeDecodeError:
print >>sys.stderr, "ERROR: unable to build documentation because Sphinx do not handle source path with non-ASCII characters. Please try to move the source package to another location (path with *only* ASCII characters)."
sys.path.pop(0)
cmdclass['build_sphinx'] = BuildDoc
#################################################################
# Check various compiler/library properties
libraries = []
if sysconfig['LIBM'] != '':
libraries.append('m')
macros = []
try:
from Scientific.MPI import world
except ImportError:
world = None
if world is not None:
if type(world) == types.InstanceType:
world = None
if world is not None:
macros.append(('WITH_MPI', None))
if hasattr(ctypes.CDLL(ctypes.util.find_library('m')), 'erfc'):
macros.append(('LIBM_HAS_ERFC', None))
if sys.platform != 'win32':
if ctypes.sizeof(ctypes.c_long) == 8:
macros.append(('_LONG64_', None))
if sys.version_info[0] == 2 and sys.version_info[1] >= 2:
macros.append(('EXTENDED_TYPES', None))
#################################################################
# System-specific optimization options
low_opt = []
if sys.platform != 'win32' and 'gcc' in sysconfig['CC']:
low_opt = ['-O0']
low_opt.append('-g')
high_opt = []
if sys.platform[:5] == 'linux' and 'gcc' in sysconfig['CC']:
high_opt = ['-O3', '-ffast-math', '-fomit-frame-pointer',
'-fkeep-inline-functions']
if sys.platform == 'darwin' and 'gcc' in sysconfig['CC']:
high_opt = ['-O3', '-ffast-math', '-fomit-frame-pointer',
'-fkeep-inline-functions', '-falign-loops=16']
if sys.platform == 'aix4':
high_opt = ['-O4']
if sys.platform == 'odf1V4':
high_opt = ['-O2', '-fp_reorder', '-ansi_alias', '-ansi_args']
high_opt.append('-g')
#################################################################
setup (name = package_name,
version = pkginfo.__version__,
description = "Molecular Modelling Toolkit",
long_description=
"""
The Molecular Modelling Toolkit (MMTK) is an Open Source program
library for molecular simulation applications. It provides the most
common methods in molecular simulations (molecular dynamics, energy
minimization, normal mode analysis) and several force fields used for
biomolecules (Amber 94, Amber 99, several elastic network
models). MMTK also serves as a code basis that can be easily extended
and modified to deal with non-standard situations in molecular
simulations.
""",
author = "Konrad Hinsen",
author_email = "hinsen@cnrs-orleans.fr",
url = "http://dirac.cnrs-orleans.fr/MMTK/",
license = "CeCILL-C",
package_dir = {'' : mmtk_home},
#packages = ['MMTK', 'MMTK.ForceFields', 'MMTK.ForceFields.Amber',
# 'MMTK.NormalModes', 'MMTK.Tk', 'MMTK.Tools',
# 'MMTK.Tools.TrajectoryViewer'],
headers = headers,
ext_package = 'MMTK.'+sys.platform,
ext_modules = [Extension('MMTK_pose',
[poseff_src + 'MMTK_pose.c', poseff_src + 'pose.c',
mmtk_home + 'Src/bonded.c', mmtk_home + 'Src/nonbonded.c',
mmtk_home + 'Src/ewald.c', mmtk_home + 'Src/sparsefc.c'],
extra_compile_args = compile_args + high_opt,
include_dirs=include_dirs + ['Src'],
define_macros = [('SERIAL', None),
('VIRIAL', None),
('MACROSCOPIC', None)]
+ macros,
libraries=libraries),
],
data_files = data_files,
#scripts = ['tviewer'],
cmdclass = cmdclass,
#command_options = {
# 'build_sphinx': {
# 'source_dir' : ('setup.py', 'Doc')}
# },
)
|
CCBatIIT/AlGDock
|
AlGDock/ForceFields/Pose/setup.py
|
Python
|
mit
| 12,538
|
[
"Amber",
"DIRAC",
"NetCDF"
] |
339dbb3d248a91c35f2339819a725da7fd2e82fbcc410cd7325d93f30972b421
|
dice = {
11111: "a" ,
11112: "a's" ,
11113: "a-1" ,
11114: "a-z" ,
11115: "aa" ,
11116: "aaa" ,
11121: "aaaa" ,
11122: "aaron" ,
11123: "ab" ,
11124: "aback" ,
11125: "abacus" ,
11126: "abase" ,
11131: "abash" ,
11132: "abate" ,
11133: "abbey" ,
11134: "abbot" ,
11135: "abbr" ,
11136: "abby" ,
11141: "abc" ,
11142: "abc's" ,
11143: "abcd" ,
11144: "abduct" ,
11145: "abdul" ,
11146: "abe" ,
11151: "abed" ,
11152: "abel" ,
11153: "abet" ,
11154: "abhor" ,
11155: "abide" ,
11156: "ablaze" ,
11161: "able" ,
11162: "abm" ,
11163: "abner" ,
11164: "aboard" ,
11165: "abode" ,
11166: "abort" ,
11211: "about" ,
11212: "above" ,
11213: "abram" ,
11214: "absent" ,
11215: "absorb" ,
11216: "abuse" ,
11221: "abut" ,
11222: "abyss" ,
11223: "ac" ,
11224: "ac/dc" ,
11225: "accept" ,
11226: "accuse" ,
11231: "ace" ,
11232: "aces" ,
11233: "ache" ,
11234: "ached" ,
11235: "aches" ,
11236: "achoo" ,
11241: "achy" ,
11242: "acid" ,
11243: "acidic" ,
11244: "acids" ,
11245: "acme" ,
11246: "acne" ,
11251: "acorn" ,
11252: "acquit" ,
11253: "acre" ,
11254: "acres" ,
11255: "acrid" ,
11256: "act" ,
11261: "acted" ,
11262: "actor" ,
11263: "acts" ,
11264: "acute" ,
11265: "ad" ,
11266: "ada" ,
11311: "adage" ,
11312: "adagio" ,
11313: "adair" ,
11314: "adam" ,
11315: "adams" ,
11316: "adapt" ,
11321: "add" ,
11322: "added" ,
11323: "adder" ,
11324: "addict" ,
11325: "addle" ,
11326: "adds" ,
11331: "adele" ,
11332: "adept" ,
11333: "adieu" ,
11334: "adios" ,
11335: "adjust" ,
11336: "adler" ,
11341: "admit" ,
11342: "ado" ,
11343: "adobe" ,
11344: "adolf" ,
11345: "adonis" ,
11346: "adopt" ,
11351: "adore" ,
11352: "adorn" ,
11353: "ads" ,
11354: "adult" ,
11355: "advent" ,
11356: "adverb" ,
11361: "advise" ,
11362: "ae" ,
11363: "aeiou" ,
11364: "aerial" ,
11365: "aesop" ,
11366: "af" ,
11411: "afar" ,
11412: "affair" ,
11413: "afghan" ,
11414: "afire" ,
11415: "afoot" ,
11416: "afraid" ,
11421: "africa" ,
11422: "afro" ,
11423: "aft" ,
11424: "after" ,
11425: "ag" ,
11426: "again" ,
11431: "agate" ,
11432: "age" ,
11433: "aged" ,
11434: "agenda" ,
11435: "agent" ,
11436: "ages" ,
11441: "agile" ,
11442: "aging" ,
11443: "aglow" ,
11444: "agnes" ,
11445: "agnew" ,
11446: "ago" ,
11451: "agony" ,
11452: "agree" ,
11453: "ah" ,
11454: "aha" ,
11455: "ahab" ,
11456: "ahead" ,
11461: "ahem" ,
11462: "ahmed" ,
11463: "ahoy" ,
11464: "ai" ,
11465: "aid" ,
11466: "aide" ,
11511: "aided" ,
11512: "ail" ,
11513: "aim" ,
11514: "aimed" ,
11515: "aims" ,
11516: "ain't" ,
11521: "air" ,
11522: "airman" ,
11523: "airway" ,
11524: "airy" ,
11525: "aisle" ,
11526: "aj" ,
11531: "ajar" ,
11532: "ajax" ,
11533: "ak" ,
11534: "aka" ,
11535: "akers" ,
11536: "akin" ,
11541: "akqj" ,
11542: "akron" ,
11543: "al" ,
11544: "alan" ,
11545: "alarm" ,
11546: "alas" ,
11551: "alaska" ,
11552: "album" ,
11553: "alden" ,
11554: "ale" ,
11555: "alec" ,
11556: "aleck" ,
11561: "alert" ,
11562: "alex" ,
11563: "alexa" ,
11564: "alexei" ,
11565: "algae" ,
11566: "alger" ,
11611: "ali" ,
11612: "alias" ,
11613: "alibi" ,
11614: "alice" ,
11615: "alien" ,
11616: "alight" ,
11621: "align" ,
11622: "alike" ,
11623: "alive" ,
11624: "alkali" ,
11625: "all" ,
11626: "allah" ,
11631: "allan" ,
11632: "allen" ,
11633: "alley" ,
11634: "allied" ,
11635: "allot" ,
11636: "allow" ,
11641: "alloy" ,
11642: "allure" ,
11643: "ally" ,
11644: "alma" ,
11645: "almost" ,
11646: "alms" ,
11651: "aloft" ,
11652: "aloha" ,
11653: "alone" ,
11654: "along" ,
11655: "aloof" ,
11656: "aloud" ,
11661: "alp" ,
11662: "alpha" ,
11663: "alps" ,
11664: "also" ,
11665: "alsop" ,
11666: "altar" ,
12111: "alter" ,
12112: "altho" ,
12113: "alto" ,
12114: "alum" ,
12115: "alumni" ,
12116: "alvin" ,
12121: "alyx" ,
12122: "am" ,
12123: "am/fm" ,
12124: "amass" ,
12125: "amaze" ,
12126: "amber" ,
12131: "amble" ,
12132: "ambush" ,
12133: "amen" ,
12134: "amend" ,
12135: "ames" ,
12136: "amid" ,
12141: "amigo" ,
12142: "amino" ,
12143: "amish" ,
12144: "amiss" ,
12145: "amity" ,
12146: "ammo" ,
12151: "amok" ,
12152: "among" ,
12153: "amos" ,
12154: "amour" ,
12155: "amp" ,
12156: "ampere" ,
12161: "ample" ,
12162: "amply" ,
12163: "amps" ,
12164: "amulet" ,
12165: "amuse" ,
12166: "amy" ,
12211: "an" ,
12212: "anal" ,
12213: "anchor" ,
12214: "and" ,
12215: "andes" ,
12216: "andre" ,
12221: "andrew" ,
12222: "andy" ,
12223: "anew" ,
12224: "angel" ,
12225: "angelo" ,
12226: "anger" ,
12231: "angie" ,
12232: "angle" ,
12233: "angles" ,
12234: "anglo" ,
12235: "angry" ,
12236: "angst" ,
12241: "angus" ,
12242: "anita" ,
12243: "ankle" ,
12244: "ann" ,
12245: "anna" ,
12246: "anne" ,
12251: "annex" ,
12252: "annie" ,
12253: "annoy" ,
12254: "annul" ,
12255: "anon" ,
12256: "answer" ,
12261: "ant" ,
12262: "ante" ,
12263: "anti" ,
12264: "antic" ,
12265: "anton" ,
12266: "ants" ,
12311: "anus" ,
12312: "anvil" ,
12313: "any" ,
12314: "anyhow" ,
12315: "anyway" ,
12316: "ao" ,
12321: "aok" ,
12322: "aorta" ,
12323: "ap" ,
12324: "apart" ,
12325: "apathy" ,
12326: "ape" ,
12331: "apes" ,
12332: "apex" ,
12333: "aphid" ,
12334: "aplomb" ,
12335: "appeal" ,
12336: "appear" ,
12341: "append" ,
12342: "apple" ,
12343: "apply" ,
12344: "apr" ,
12345: "april" ,
12346: "apron" ,
12351: "apt" ,
12352: "aq" ,
12353: "aqua" ,
12354: "ar" ,
12355: "arab" ,
12356: "arabs" ,
12361: "araby" ,
12362: "arbor" ,
12363: "arc" ,
12364: "arcade" ,
12365: "arch" ,
12366: "archer" ,
12411: "arcs" ,
12412: "ardent" ,
12413: "are" ,
12414: "area" ,
12415: "areas" ,
12416: "arena" ,
12421: "argon" ,
12422: "argue" ,
12423: "aria" ,
12424: "arid" ,
12425: "arise" ,
12426: "ark" ,
12431: "arlene" ,
12432: "arm" ,
12433: "armed" ,
12434: "armor" ,
12435: "arms" ,
12436: "army" ,
12441: "arnold" ,
12442: "aroma" ,
12443: "arose" ,
12444: "array" ,
12445: "arrive" ,
12446: "arrow" ,
12451: "arson" ,
12452: "art" ,
12453: "artery" ,
12454: "arthur" ,
12455: "artie" ,
12456: "arts" ,
12461: "arty" ,
12462: "aryan" ,
12463: "as" ,
12464: "asap" ,
12465: "ascend" ,
12466: "ascii" ,
12511: "ash" ,
12512: "ashen" ,
12513: "ashes" ,
12514: "ashley" ,
12515: "ashy" ,
12516: "asia" ,
12521: "asian" ,
12522: "aside" ,
12523: "ask" ,
12524: "asked" ,
12525: "askew" ,
12526: "asks" ,
12531: "asleep" ,
12532: "asp" ,
12533: "aspen" ,
12534: "aspire" ,
12535: "ass" ,
12536: "asses" ,
12541: "asset" ,
12542: "assn" ,
12543: "assure" ,
12544: "asthma" ,
12545: "astor" ,
12546: "astral" ,
12551: "at" ,
12552: "at&t" ,
12553: "atari" ,
12554: "ate" ,
12555: "athens" ,
12556: "atlas" ,
12561: "atm" ,
12562: "atoll" ,
12563: "atom" ,
12564: "atomic" ,
12565: "atoms" ,
12566: "atone" ,
12611: "atop" ,
12612: "attic" ,
12613: "attire" ,
12614: "attn" ,
12615: "au" ,
12616: "audio" ,
12621: "audit" ,
12622: "audrey" ,
12623: "aug" ,
12624: "augur" ,
12625: "august" ,
12626: "auk" ,
12631: "aunt" ,
12632: "aunts" ,
12633: "aura" ,
12634: "aural" ,
12635: "austin" ,
12636: "auto" ,
12641: "autumn" ,
12642: "av" ,
12643: "avail" ,
12644: "avert" ,
12645: "avery" ,
12646: "avian" ,
12651: "aviate" ,
12652: "avid" ,
12653: "avis" ,
12654: "avoid" ,
12655: "avon" ,
12656: "avow" ,
12661: "aw" ,
12662: "await" ,
12663: "awake" ,
12664: "award" ,
12665: "aware" ,
12666: "awash" ,
13111: "away" ,
13112: "awe" ,
13113: "awed" ,
13114: "awful" ,
13115: "awl" ,
13116: "awn" ,
13121: "awoke" ,
13122: "awol" ,
13123: "awry" ,
13124: "ax" ,
13125: "axe" ,
13126: "axes" ,
13131: "axiom" ,
13132: "axis" ,
13133: "axle" ,
13134: "ay" ,
13135: "aye" ,
13136: "az" ,
13141: "aztec" ,
13142: "azure" ,
13143: "b" ,
13144: "b&w" ,
13145: "b's" ,
13146: "b-52" ,
13151: "ba" ,
13152: "baal" ,
13153: "babe" ,
13154: "babel" ,
13155: "babes" ,
13156: "baboon" ,
13161: "baby" ,
13162: "bach" ,
13163: "back" ,
13164: "backup" ,
13165: "bacon" ,
13166: "bad" ,
13211: "badge" ,
13212: "badly" ,
13213: "baffle" ,
13214: "bag" ,
13215: "bagel" ,
13216: "baggy" ,
13221: "bags" ,
13222: "bah" ,
13223: "bahama" ,
13224: "bail" ,
13225: "bait" ,
13226: "bake" ,
13231: "baker" ,
13232: "bakes" ,
13233: "bald" ,
13234: "bale" ,
13235: "bali" ,
13236: "balk" ,
13241: "balkan" ,
13242: "ball" ,
13243: "balled" ,
13244: "ballot" ,
13245: "balls" ,
13246: "balm" ,
13251: "balmy" ,
13252: "balsa" ,
13253: "bambi" ,
13254: "ban" ,
13255: "banal" ,
13256: "banana" ,
13261: "band" ,
13262: "bandit" ,
13263: "bands" ,
13264: "bandy" ,
13265: "bane" ,
13266: "bang" ,
13311: "bangs" ,
13312: "banish" ,
13313: "banjo" ,
13314: "bank" ,
13315: "banks" ,
13316: "bar" ,
13321: "barb" ,
13322: "barbs" ,
13323: "bard" ,
13324: "bare" ,
13325: "barf" ,
13326: "barge" ,
13331: "bark" ,
13332: "barks" ,
13333: "barley" ,
13334: "barn" ,
13335: "barnes" ,
13336: "baron" ,
13341: "barony" ,
13342: "barry" ,
13343: "bars" ,
13344: "bart" ,
13345: "barter" ,
13346: "barton" ,
13351: "base" ,
13352: "bash" ,
13353: "basic" ,
13354: "basil" ,
13355: "basin" ,
13356: "basis" ,
13361: "bask" ,
13362: "basket" ,
13363: "bass" ,
13364: "baste" ,
13365: "bat" ,
13366: "batch" ,
13411: "bates" ,
13412: "bath" ,
13413: "bathe" ,
13414: "baths" ,
13415: "baton" ,
13416: "bats" ,
13421: "bauble" ,
13422: "baud" ,
13423: "bawd" ,
13424: "bawdy" ,
13425: "bawl" ,
13426: "bay" ,
13431: "bayer" ,
13432: "bayou" ,
13433: "bays" ,
13434: "bazaar" ,
13435: "bb" ,
13436: "bbb" ,
13441: "bbbb" ,
13442: "bbc" ,
13443: "bbs" ,
13444: "bc" ,
13445: "bcd" ,
13446: "bd" ,
13451: "be" ,
13452: "beach" ,
13453: "beacon" ,
13454: "bead" ,
13455: "beads" ,
13456: "beady" ,
13461: "beak" ,
13462: "beam" ,
13463: "beams" ,
13464: "bean" ,
13465: "beans" ,
13466: "bear" ,
13511: "beard" ,
13512: "bears" ,
13513: "beast" ,
13514: "beat" ,
13515: "beats" ,
13516: "beau" ,
13521: "beauty" ,
13522: "beaver" ,
13523: "bebop" ,
13524: "beck" ,
13525: "becky" ,
13526: "bed" ,
13531: "beds" ,
13532: "bee" ,
13533: "beech" ,
13534: "beef" ,
13535: "beefy" ,
13536: "been" ,
13541: "beep" ,
13542: "beeps" ,
13543: "beer" ,
13544: "beers" ,
13545: "bees" ,
13546: "beet" ,
13551: "beets" ,
13552: "befall" ,
13553: "befit" ,
13554: "befog" ,
13555: "beg" ,
13556: "began" ,
13561: "beget" ,
13562: "beggar" ,
13563: "begin" ,
13564: "begs" ,
13565: "begun" ,
13566: "behind" ,
13611: "beige" ,
13612: "being" ,
13613: "beirut" ,
13614: "belch" ,
13615: "belfry" ,
13616: "belief" ,
13621: "bell" ,
13622: "bella" ,
13623: "belle" ,
13624: "bellow" ,
13625: "bells" ,
13626: "belly" ,
13631: "below" ,
13632: "belt" ,
13633: "belts" ,
13634: "bemoan" ,
13635: "ben" ,
13636: "bench" ,
13641: "bend" ,
13642: "bender" ,
13643: "bends" ,
13644: "benign" ,
13645: "benny" ,
13646: "bent" ,
13651: "benz" ,
13652: "beret" ,
13653: "berg" ,
13654: "berlin" ,
13655: "berra" ,
13656: "berry" ,
13661: "bert" ,
13662: "berth" ,
13663: "beryl" ,
13664: "beset" ,
13665: "bess" ,
13666: "best" ,
14111: "bet" ,
14112: "beta" ,
14113: "beth" ,
14114: "betray" ,
14115: "bets" ,
14116: "betsy" ,
14121: "bette" ,
14122: "betty" ,
14123: "bevy" ,
14124: "beware" ,
14125: "beyond" ,
14126: "bf" ,
14131: "bflat" ,
14132: "bg" ,
14133: "bh" ,
14134: "bi" ,
14135: "bias" ,
14136: "bib" ,
14141: "bible" ,
14142: "biceps" ,
14143: "bid" ,
14144: "bide" ,
14145: "bids" ,
14146: "bier" ,
14151: "big" ,
14152: "bigamy" ,
14153: "bigot" ,
14154: "bike" ,
14155: "biker" ,
14156: "bikini" ,
14161: "bile" ,
14162: "bilge" ,
14163: "bilk" ,
14164: "bill" ,
14165: "bills" ,
14166: "billy" ,
14211: "bimbo" ,
14212: "bin" ,
14213: "binary" ,
14214: "bind" ,
14215: "binge" ,
14216: "bingo" ,
14221: "biped" ,
14222: "birch" ,
14223: "bird" ,
14224: "birdie" ,
14225: "birds" ,
14226: "birth" ,
14231: "bison" ,
14232: "bisque" ,
14233: "bit" ,
14234: "bite" ,
14235: "bites" ,
14236: "bits" ,
14241: "bitten" ,
14242: "biz" ,
14243: "bj" ,
14244: "bk" ,
14245: "bl" ,
14246: "blab" ,
14251: "black" ,
14252: "blade" ,
14253: "blah" ,
14254: "blair" ,
14255: "blake" ,
14256: "blame" ,
14261: "bland" ,
14262: "blank" ,
14263: "blare" ,
14264: "blast" ,
14265: "blat" ,
14266: "blaze" ,
14311: "bldg" ,
14312: "bleak" ,
14313: "bleat" ,
14314: "bled" ,
14315: "bleed" ,
14316: "blend" ,
14321: "bless" ,
14322: "blew" ,
14323: "blimp" ,
14324: "blind" ,
14325: "blink" ,
14326: "blip" ,
14331: "blips" ,
14332: "bliss" ,
14333: "blithe" ,
14334: "blitz" ,
14335: "bloat" ,
14336: "blob" ,
14341: "blobs" ,
14342: "bloc" ,
14343: "block" ,
14344: "bloke" ,
14345: "blond" ,
14346: "blonde" ,
14351: "blood" ,
14352: "bloom" ,
14353: "bloop" ,
14354: "blot" ,
14355: "blotch" ,
14356: "blots" ,
14361: "blow" ,
14362: "blown" ,
14363: "blows" ,
14364: "blt" ,
14365: "blue" ,
14366: "blues" ,
14411: "bluff" ,
14412: "blunt" ,
14413: "blur" ,
14414: "blurs" ,
14415: "blurt" ,
14416: "blush" ,
14421: "blvd" ,
14422: "blythe" ,
14423: "bm" ,
14424: "bmw" ,
14425: "bn" ,
14426: "bo" ,
14431: "boa" ,
14432: "boar" ,
14433: "board" ,
14434: "boast" ,
14435: "boat" ,
14436: "boats" ,
14441: "bob" ,
14442: "bobby" ,
14443: "bobcat" ,
14444: "bobs" ,
14445: "bode" ,
14446: "body" ,
14451: "bog" ,
14452: "bogey" ,
14453: "boggy" ,
14454: "bogs" ,
14455: "bogus" ,
14456: "boil" ,
14461: "boils" ,
14462: "boise" ,
14463: "bold" ,
14464: "bolt" ,
14465: "bolts" ,
14466: "bomb" ,
14511: "bombay" ,
14512: "bombs" ,
14513: "bond" ,
14514: "bone" ,
14515: "bones" ,
14516: "bong" ,
14521: "bongo" ,
14522: "bonn" ,
14523: "bonus" ,
14524: "bony" ,
14525: "boo" ,
14526: "boob" ,
14531: "booby" ,
14532: "boogie" ,
14533: "book" ,
14534: "books" ,
14535: "boom" ,
14536: "boon" ,
14541: "boone" ,
14542: "boor" ,
14543: "boost" ,
14544: "boot" ,
14545: "booth" ,
14546: "boots" ,
14551: "booty" ,
14552: "booze" ,
14553: "bop" ,
14554: "borax" ,
14555: "border" ,
14556: "bore" ,
14561: "bored" ,
14562: "bores" ,
14563: "borg" ,
14564: "boris" ,
14565: "born" ,
14566: "borneo" ,
14611: "boron" ,
14612: "bosom" ,
14613: "boss" ,
14614: "bossy" ,
14615: "boston" ,
14616: "botch" ,
14621: "both" ,
14622: "bottle" ,
14623: "bough" ,
14624: "bouncy" ,
14625: "bound" ,
14626: "bout" ,
14631: "bovine" ,
14632: "bow" ,
14633: "bowed" ,
14634: "bowel" ,
14635: "bowie" ,
14636: "bowl" ,
14641: "bowls" ,
14642: "bows" ,
14643: "box" ,
14644: "boxed" ,
14645: "boxer" ,
14646: "boxes" ,
14651: "boxy" ,
14652: "boy" ,
14653: "boyd" ,
14654: "boyle" ,
14655: "boys" ,
14656: "bozo" ,
14661: "bp" ,
14662: "bq" ,
14663: "br" ,
14664: "bra" ,
14665: "brace" ,
14666: "brad" ,
15111: "brady" ,
15112: "brag" ,
15113: "brags" ,
15114: "braid" ,
15115: "brain" ,
15116: "brainy" ,
15121: "brake" ,
15122: "bran" ,
15123: "brand" ,
15124: "brandy" ,
15125: "brash" ,
15126: "brass" ,
15131: "brassy" ,
15132: "brat" ,
15133: "brats" ,
15134: "brave" ,
15135: "bravo" ,
15136: "brawl" ,
15141: "brawn" ,
15142: "bray" ,
15143: "brazil" ,
15144: "bread" ,
15145: "break" ,
15146: "breath" ,
15151: "bred" ,
15152: "breed" ,
15153: "breeze" ,
15154: "brew" ,
15155: "brian" ,
15156: "briar" ,
15161: "bribe" ,
15162: "brick" ,
15163: "bride" ,
15164: "bridge" ,
15165: "brief" ,
15166: "brig" ,
15211: "brim" ,
15212: "brine" ,
15213: "bring" ,
15214: "brink" ,
15215: "briny" ,
15216: "brisk" ,
15221: "broad" ,
15222: "broil" ,
15223: "broke" ,
15224: "broken" ,
15225: "bronco" ,
15226: "bronx" ,
15231: "brood" ,
15232: "brook" ,
15233: "broom" ,
15234: "broth" ,
15235: "brow" ,
15236: "brown" ,
15241: "brows" ,
15242: "browse" ,
15243: "bruce" ,
15244: "bruin" ,
15245: "brunch" ,
15246: "bruno" ,
15251: "brunt" ,
15252: "brush" ,
15253: "brutal" ,
15254: "brute" ,
15255: "bryan" ,
15256: "bs" ,
15261: "bt" ,
15262: "btu" ,
15263: "bu" ,
15264: "bub" ,
15265: "buck" ,
15266: "bucks" ,
15311: "bud" ,
15312: "buddha" ,
15313: "buddy" ,
15314: "budge" ,
15315: "buds" ,
15316: "buff" ,
15321: "bug" ,
15322: "buggy" ,
15323: "bugle" ,
15324: "bugs" ,
15325: "buick" ,
15326: "build" ,
15331: "built" ,
15332: "bulb" ,
15333: "bulbs" ,
15334: "bulge" ,
15335: "bulk" ,
15336: "bulky" ,
15341: "bull" ,
15342: "bulls" ,
15343: "bully" ,
15344: "bum" ,
15345: "bump" ,
15346: "bumps" ,
15351: "bumpy" ,
15352: "bums" ,
15353: "bun" ,
15354: "bunch" ,
15355: "bunco" ,
15356: "bundy" ,
15361: "bunk" ,
15362: "bunny" ,
15363: "buns" ,
15364: "bunt" ,
15365: "bunts" ,
15366: "buoy" ,
15411: "bureau" ,
15412: "burg" ,
15413: "burger" ,
15414: "buried" ,
15415: "burke" ,
15416: "burly" ,
15421: "burma" ,
15422: "burn" ,
15423: "burns" ,
15424: "burnt" ,
15425: "burp" ,
15426: "burps" ,
15431: "burro" ,
15432: "burst" ,
15433: "burt" ,
15434: "burton" ,
15435: "bury" ,
15436: "bus" ,
15441: "bush" ,
15442: "bushel" ,
15443: "bushy" ,
15444: "buss" ,
15445: "bust" ,
15446: "busy" ,
15451: "but" ,
15452: "butane" ,
15453: "butch" ,
15454: "butt" ,
15455: "butte" ,
15456: "buxom" ,
15461: "buy" ,
15462: "buyer" ,
15463: "buys" ,
15464: "buzz" ,
15465: "bv" ,
15466: "bvm" ,
15511: "bw" ,
15512: "bwana" ,
15513: "bx" ,
15514: "by" ,
15515: "bye" ,
15516: "bylaw" ,
15521: "byline" ,
15522: "byob" ,
15523: "bypass" ,
15524: "byrd" ,
15525: "byron" ,
15526: "byte" ,
15531: "bytes" ,
15532: "byway" ,
15533: "bz" ,
15534: "c" ,
15535: "c#" ,
15536: "c&w" ,
15541: "c's" ,
15542: "c/o" ,
15543: "ca" ,
15544: "cab" ,
15545: "cabal" ,
15546: "cabana" ,
15551: "cabin" ,
15552: "cable" ,
15553: "cabot" ,
15554: "cache" ,
15555: "cackle" ,
15556: "cacti" ,
15561: "caddy" ,
15562: "cadet" ,
15563: "caesar" ,
15564: "cafe" ,
15565: "cage" ,
15566: "caged" ,
15611: "cages" ,
15612: "cagey" ,
15613: "cain" ,
15614: "cairn" ,
15615: "cairo" ,
15616: "cajun" ,
15621: "cake" ,
15622: "cakes" ,
15623: "calf" ,
15624: "calico" ,
15625: "call" ,
15626: "calls" ,
15631: "callus" ,
15632: "calm" ,
15633: "calms" ,
15634: "calvin" ,
15635: "cam" ,
15636: "came" ,
15641: "camel" ,
15642: "cameo" ,
15643: "camera" ,
15644: "camp" ,
15645: "camps" ,
15646: "camry" ,
15651: "can" ,
15652: "can't" ,
15653: "canal" ,
15654: "canary" ,
15655: "cancer" ,
15656: "candle" ,
15661: "candy" ,
15662: "cane" ,
15663: "caned" ,
15664: "canes" ,
15665: "cannot" ,
15666: "canny" ,
16111: "canoe" ,
16112: "canon" ,
16113: "canopy" ,
16114: "cans" ,
16115: "canto" ,
16116: "canvas" ,
16121: "canyon" ,
16122: "cap" ,
16123: "cape" ,
16124: "caped" ,
16125: "caper" ,
16126: "capri" ,
16131: "car" ,
16132: "carat" ,
16133: "carbon" ,
16134: "card" ,
16135: "care" ,
16136: "cares" ,
16141: "caress" ,
16142: "caret" ,
16143: "cargo" ,
16144: "carl" ,
16145: "carla" ,
16146: "carlo" ,
16151: "carol" ,
16152: "carp" ,
16153: "carpet" ,
16154: "carrie" ,
16155: "carry" ,
16156: "cars" ,
16161: "carson" ,
16162: "cart" ,
16163: "caruso" ,
16164: "carve" ,
16165: "case" ,
16166: "cases" ,
16211: "casey" ,
16212: "cash" ,
16213: "cashew" ,
16214: "cask" ,
16215: "casket" ,
16216: "cast" ,
16221: "caste" ,
16222: "cat" ,
16223: "catch" ,
16224: "cater" ,
16225: "cathy" ,
16226: "cats" ,
16231: "catsup" ,
16232: "catty" ,
16233: "caulk" ,
16234: "cause" ,
16235: "cave" ,
16236: "cavern" ,
16241: "caves" ,
16242: "cavort" ,
16243: "cb" ,
16244: "cc" ,
16245: "ccc" ,
16246: "cccc" ,
16251: "cccp" ,
16252: "cd" ,
16253: "cde" ,
16254: "ce" ,
16255: "cease" ,
16256: "cecil" ,
16261: "cedar" ,
16262: "cede" ,
16263: "celery" ,
16264: "celia" ,
16265: "cell" ,
16266: "cello" ,
16311: "census" ,
16312: "cent" ,
16313: "cents" ,
16314: "ceo" ,
16315: "cesar" ,
16316: "cf" ,
16321: "cg" ,
16322: "ch" ,
16323: "chad" ,
16324: "chafe" ,
16325: "chaff" ,
16326: "chain" ,
16331: "chair" ,
16332: "chalk" ,
16333: "champ" ,
16334: "chance" ,
16335: "chant" ,
16336: "chaos" ,
16341: "chap" ,
16342: "chapel" ,
16343: "char" ,
16344: "charm" ,
16345: "chart" ,
16346: "chase" ,
16351: "chasm" ,
16352: "chaste" ,
16353: "chat" ,
16354: "chats" ,
16355: "cheap" ,
16356: "cheat" ,
16361: "check" ,
16362: "cheek" ,
16363: "cheeky" ,
16364: "cheer" ,
16365: "chef" ,
16366: "cherub" ,
16411: "chess" ,
16412: "chest" ,
16413: "chevy" ,
16414: "chew" ,
16415: "chews" ,
16416: "chewy" ,
16421: "chi" ,
16422: "chic" ,
16423: "chick" ,
16424: "chide" ,
16425: "chief" ,
16426: "child" ,
16431: "chile" ,
16432: "chili" ,
16433: "chill" ,
16434: "chilly" ,
16435: "chime" ,
16436: "chimp" ,
16441: "chin" ,
16442: "china" ,
16443: "chip" ,
16444: "chips" ,
16445: "chirp" ,
16446: "chisel" ,
16451: "chit" ,
16452: "chive" ,
16453: "chloe" ,
16454: "chock" ,
16455: "choir" ,
16456: "choke" ,
16461: "chomp" ,
16462: "chop" ,
16463: "chopin" ,
16464: "chops" ,
16465: "choral" ,
16466: "chord" ,
16511: "chore" ,
16512: "chose" ,
16513: "chosen" ,
16514: "chow" ,
16515: "chris" ,
16516: "chub" ,
16521: "chuck" ,
16522: "chug" ,
16523: "chum" ,
16524: "chump" ,
16525: "chunk" ,
16526: "churn" ,
16531: "chute" ,
16532: "ci" ,
16533: "cia" ,
16534: "ciao" ,
16535: "cicada" ,
16536: "cider" ,
16541: "cigar" ,
16542: "cilia" ,
16543: "cinch" ,
16544: "cindy" ,
16545: "cipher" ,
16546: "circa" ,
16551: "circe" ,
16552: "cite" ,
16553: "citrus" ,
16554: "city" ,
16555: "civet" ,
16556: "civic" ,
16561: "civil" ,
16562: "cj" ,
16563: "ck" ,
16564: "cl" ,
16565: "clad" ,
16566: "claim" ,
16611: "clam" ,
16612: "clammy" ,
16613: "clamp" ,
16614: "clan" ,
16615: "clang" ,
16616: "clank" ,
16621: "clap" ,
16622: "claps" ,
16623: "clara" ,
16624: "clark" ,
16625: "clash" ,
16626: "clasp" ,
16631: "class" ,
16632: "claus" ,
16633: "clause" ,
16634: "claw" ,
16635: "claws" ,
16636: "clay" ,
16641: "clean" ,
16642: "clear" ,
16643: "cleat" ,
16644: "clef" ,
16645: "cleft" ,
16646: "clem" ,
16651: "cleo" ,
16652: "clerk" ,
16653: "clever" ,
16654: "cliche" ,
16655: "click" ,
16656: "cliff" ,
16661: "climb" ,
16662: "cling" ,
16663: "clink" ,
16664: "clip" ,
16665: "cloak" ,
16666: "clock" ,
21111: "clod" ,
21112: "clog" ,
21113: "clone" ,
21114: "close" ,
21115: "closet" ,
21116: "clot" ,
21121: "cloth" ,
21122: "cloud" ,
21123: "clout" ,
21124: "clove" ,
21125: "clown" ,
21126: "cloy" ,
21131: "club" ,
21132: "clubs" ,
21133: "cluck" ,
21134: "clue" ,
21135: "clues" ,
21136: "clump" ,
21141: "clumsy" ,
21142: "clung" ,
21143: "clyde" ,
21144: "cm" ,
21145: "cn" ,
21146: "co" ,
21151: "co2" ,
21152: "coach" ,
21153: "coal" ,
21154: "coast" ,
21155: "coat" ,
21156: "coats" ,
21161: "coax" ,
21162: "cob" ,
21163: "cobble" ,
21164: "cobol" ,
21165: "cobra" ,
21166: "coca" ,
21211: "cock" ,
21212: "cockle" ,
21213: "cocky" ,
21214: "cocoa" ,
21215: "cod" ,
21216: "coda" ,
21221: "coddle" ,
21222: "code" ,
21223: "coded" ,
21224: "codes" ,
21225: "cody" ,
21226: "coed" ,
21231: "cog" ,
21232: "cogent" ,
21233: "cogs" ,
21234: "cohen" ,
21235: "coif" ,
21236: "coil" ,
21241: "coils" ,
21242: "coin" ,
21243: "coins" ,
21244: "coke" ,
21245: "cola" ,
21246: "colby" ,
21251: "cold" ,
21252: "cole" ,
21253: "colon" ,
21254: "colony" ,
21255: "color" ,
21256: "colt" ,
21261: "coma" ,
21262: "comb" ,
21263: "combat" ,
21264: "combo" ,
21265: "come" ,
21266: "comet" ,
21311: "comfy" ,
21312: "comic" ,
21313: "comma" ,
21314: "con" ,
21315: "conch" ,
21316: "condo" ,
21321: "cone" ,
21322: "coney" ,
21323: "congo" ,
21324: "conic" ,
21325: "convex" ,
21326: "convoy" ,
21331: "conway" ,
21332: "coo" ,
21333: "cook" ,
21334: "cooky" ,
21335: "cool" ,
21336: "coon" ,
21341: "coop" ,
21342: "cooper" ,
21343: "coors" ,
21344: "coos" ,
21345: "coot" ,
21346: "cop" ,
21351: "cope" ,
21352: "copes" ,
21353: "copper" ,
21354: "copra" ,
21355: "cops" ,
21356: "copy" ,
21361: "coral" ,
21362: "cord" ,
21363: "cords" ,
21364: "core" ,
21365: "cork" ,
21366: "corn" ,
21411: "corny" ,
21412: "corp" ,
21413: "corps" ,
21414: "cortex" ,
21415: "cost" ,
21416: "costs" ,
21421: "cot" ,
21422: "couch" ,
21423: "cough" ,
21424: "could" ,
21425: "count" ,
21426: "coup" ,
21431: "coupe" ,
21432: "court" ,
21433: "cousin" ,
21434: "cove" ,
21435: "coven" ,
21436: "cover" ,
21441: "covet" ,
21442: "cow" ,
21443: "cowboy" ,
21444: "cowl" ,
21445: "cows" ,
21446: "cox" ,
21451: "coy" ,
21452: "coyote" ,
21453: "cozy" ,
21454: "cp" ,
21455: "cpa" ,
21456: "cpr" ,
21461: "cpu" ,
21462: "cq" ,
21463: "cr" ,
21464: "crab" ,
21465: "crack" ,
21466: "craft" ,
21511: "crag" ,
21512: "craig" ,
21513: "cram" ,
21514: "cramp" ,
21515: "crane" ,
21516: "crank" ,
21521: "crap" ,
21522: "craps" ,
21523: "crash" ,
21524: "crass" ,
21525: "crate" ,
21526: "crater" ,
21531: "crave" ,
21532: "crawl" ,
21533: "craze" ,
21534: "crazy" ,
21535: "creak" ,
21536: "cream" ,
21541: "credit" ,
21542: "credo" ,
21543: "creed" ,
21544: "creek" ,
21545: "creep" ,
21546: "creole" ,
21551: "crepe" ,
21552: "crept" ,
21553: "cress" ,
21554: "crest" ,
21555: "crete" ,
21556: "crew" ,
21561: "crib" ,
21562: "cried" ,
21563: "crime" ,
21564: "crimp" ,
21565: "crisp" ,
21566: "croak" ,
21611: "crock" ,
21612: "crocus" ,
21613: "crone" ,
21614: "crony" ,
21615: "crook" ,
21616: "croon" ,
21621: "crop" ,
21622: "crops" ,
21623: "cross" ,
21624: "crow" ,
21625: "crowd" ,
21626: "crown" ,
21631: "crows" ,
21632: "crt" ,
21633: "crud" ,
21634: "crude" ,
21635: "cruel" ,
21636: "crumb" ,
21641: "crunch" ,
21642: "crush" ,
21643: "crust" ,
21644: "crux" ,
21645: "cry" ,
21646: "crypt" ,
21651: "cs" ,
21652: "ct" ,
21653: "cu" ,
21654: "cub" ,
21655: "cuba" ,
21656: "cuban" ,
21661: "cube" ,
21662: "cubic" ,
21663: "cubs" ,
21664: "cud" ,
21665: "cuddle" ,
21666: "cue" ,
22111: "cues" ,
22112: "cuff" ,
22113: "cull" ,
22114: "cult" ,
22115: "cults" ,
22116: "cup" ,
22121: "cupful" ,
22122: "cupid" ,
22123: "cups" ,
22124: "cur" ,
22125: "curb" ,
22126: "curd" ,
22131: "cure" ,
22132: "cured" ,
22133: "curfew" ,
22134: "curie" ,
22135: "curio" ,
22136: "curl" ,
22141: "curls" ,
22142: "curry" ,
22143: "curse" ,
22144: "curt" ,
22145: "curve" ,
22146: "cusp" ,
22151: "cuss" ,
22152: "cut" ,
22153: "cute" ,
22154: "cutlet" ,
22155: "cuts" ,
22156: "cv" ,
22161: "cw" ,
22162: "cx" ,
22163: "cy" ,
22164: "cycle" ,
22165: "cynic" ,
22166: "cyrus" ,
22211: "cyst" ,
22212: "cz" ,
22213: "czar" ,
22214: "czech" ,
22215: "d" ,
22216: "d&d" ,
22221: "d's" ,
22222: "d-day" ,
22223: "da" ,
22224: "dab" ,
22225: "dad" ,
22226: "daddy" ,
22231: "daffy" ,
22232: "daft" ,
22233: "dagger" ,
22234: "dahlia" ,
22235: "daily" ,
22236: "dairy" ,
22241: "dais" ,
22242: "daisy" ,
22243: "dale" ,
22244: "dally" ,
22245: "dam" ,
22246: "dame" ,
22251: "damn" ,
22252: "damon" ,
22253: "damp" ,
22254: "damsel" ,
22255: "dan" ,
22256: "dana" ,
22261: "dance" ,
22262: "dandy" ,
22263: "dane" ,
22264: "dang" ,
22265: "dank" ,
22266: "danny" ,
22311: "dante" ,
22312: "dare" ,
22313: "dared" ,
22314: "dares" ,
22315: "dark" ,
22316: "darken" ,
22321: "darn" ,
22322: "dart" ,
22323: "darts" ,
22324: "darwin" ,
22325: "daryl" ,
22326: "dash" ,
22331: "data" ,
22332: "date" ,
22333: "dates" ,
22334: "datum" ,
22335: "daub" ,
22336: "daunt" ,
22341: "dave" ,
22342: "david" ,
22343: "davis" ,
22344: "davy" ,
22345: "dawn" ,
22346: "day" ,
22351: "days" ,
22352: "daze" ,
22353: "dazed" ,
22354: "db" ,
22355: "dbms" ,
22356: "dc" ,
22361: "dd" ,
22362: "ddd" ,
22363: "dddd" ,
22364: "dds" ,
22365: "ddt" ,
22366: "de" ,
22411: "deacon" ,
22412: "dead" ,
22413: "deaf" ,
22414: "deal" ,
22415: "deals" ,
22416: "dealt" ,
22421: "dean" ,
22422: "dear" ,
22423: "death" ,
22424: "debby" ,
22425: "debit" ,
22426: "debra" ,
22431: "debris" ,
22432: "debt" ,
22433: "debts" ,
22434: "debug" ,
22435: "debut" ,
22436: "dec" ,
22441: "decal" ,
22442: "decay" ,
22443: "deck" ,
22444: "decor" ,
22445: "decoy" ,
22446: "decree" ,
22451: "decry" ,
22452: "dee" ,
22453: "deed" ,
22454: "deeds" ,
22455: "deejay" ,
22456: "deem" ,
22461: "deep" ,
22462: "deer" ,
22463: "def" ,
22464: "defect" ,
22465: "defer" ,
22466: "deform" ,
22511: "deft" ,
22512: "defy" ,
22513: "deify" ,
22514: "deity" ,
22515: "del" ,
22516: "delay" ,
22521: "delhi" ,
22522: "deli" ,
22523: "delia" ,
22524: "della" ,
22525: "delta" ,
22526: "deluxe" ,
22531: "delve" ,
22532: "demo" ,
22533: "demon" ,
22534: "demur" ,
22535: "den" ,
22536: "denial" ,
22541: "denim" ,
22542: "denny" ,
22543: "dense" ,
22544: "dent" ,
22545: "dents" ,
22546: "deny" ,
22551: "depot" ,
22552: "dept" ,
22553: "depth" ,
22554: "deputy" ,
22555: "derby" ,
22556: "derek" ,
22561: "desist" ,
22562: "desk" ,
22563: "desks" ,
22564: "detach" ,
22565: "deter" ,
22566: "detox" ,
22611: "deuce" ,
22612: "devil" ,
22613: "devoid" ,
22614: "dew" ,
22615: "dewey" ,
22616: "dewy" ,
22621: "df" ,
22622: "dg" ,
22623: "dh" ,
22624: "di" ,
22625: "dial" ,
22626: "dials" ,
22631: "diana" ,
22632: "diane" ,
22633: "diaper" ,
22634: "diary" ,
22635: "dibs" ,
22636: "dice" ,
22641: "dick" ,
22642: "did" ,
22643: "die" ,
22644: "died" ,
22645: "diego" ,
22646: "dies" ,
22651: "diesel" ,
22652: "diet" ,
22653: "diets" ,
22654: "dig" ,
22655: "digit" ,
22656: "digs" ,
22661: "dike" ,
22662: "dilate" ,
22663: "dill" ,
22664: "dim" ,
22665: "dime" ,
22666: "dimes" ,
23111: "dimly" ,
23112: "dims" ,
23113: "din" ,
23114: "dinah" ,
23115: "dine" ,
23116: "diner" ,
23121: "ding" ,
23122: "dingo" ,
23123: "dingy" ,
23124: "dint" ,
23125: "diode" ,
23126: "dip" ,
23131: "dips" ,
23132: "dire" ,
23133: "dirge" ,
23134: "dirk" ,
23135: "dirt" ,
23136: "dirty" ,
23141: "disc" ,
23142: "disco" ,
23143: "dish" ,
23144: "disk" ,
23145: "disney" ,
23146: "ditch" ,
23151: "ditto" ,
23152: "ditty" ,
23153: "diva" ,
23154: "divan" ,
23155: "dive" ,
23156: "dives" ,
23161: "divot" ,
23162: "dixie" ,
23163: "dizzy" ,
23164: "dj" ,
23165: "dk" ,
23166: "dl" ,
23211: "dm" ,
23212: "dn" ,
23213: "dna" ,
23214: "do" ,
23215: "dobro" ,
23216: "doc" ,
23221: "dock" ,
23222: "docket" ,
23223: "doctor" ,
23224: "dodge" ,
23225: "dodo" ,
23226: "doe" ,
23231: "does" ,
23232: "doff" ,
23233: "dog" ,
23234: "dogma" ,
23235: "dogs" ,
23236: "doily" ,
23241: "doing" ,
23242: "dolby" ,
23243: "dole" ,
23244: "doll" ,
23245: "dolly" ,
23246: "dolt" ,
23251: "dome" ,
23252: "domed" ,
23253: "domino" ,
23254: "don" ,
23255: "don't" ,
23256: "done" ,
23261: "donna" ,
23262: "donor" ,
23263: "donut" ,
23264: "doom" ,
23265: "door" ,
23266: "dope" ,
23311: "dopey" ,
23312: "dora" ,
23313: "doris" ,
23314: "dorm" ,
23315: "dose" ,
23316: "dot" ,
23321: "dote" ,
23322: "dots" ,
23323: "double" ,
23324: "doubt" ,
23325: "doug" ,
23326: "dough" ,
23331: "douse" ,
23332: "dove" ,
23333: "doves" ,
23334: "dowel" ,
23335: "down" ,
23336: "dowry" ,
23341: "doze" ,
23342: "dozen" ,
23343: "dp" ,
23344: "dq" ,
23345: "dr" ,
23346: "drab" ,
23351: "draft" ,
23352: "drag" ,
23353: "drain" ,
23354: "drake" ,
23355: "drama" ,
23356: "drank" ,
23361: "drape" ,
23362: "draw" ,
23363: "drawl" ,
23364: "drawn" ,
23365: "dread" ,
23366: "dream" ,
23411: "dreamy" ,
23412: "dregs" ,
23413: "dress" ,
23414: "dressy" ,
23415: "drew" ,
23416: "dried" ,
23421: "drier" ,
23422: "dries" ,
23423: "drift" ,
23424: "drill" ,
23425: "drink" ,
23426: "drip" ,
23431: "drips" ,
23432: "drive" ,
23433: "droid" ,
23434: "droll" ,
23435: "drone" ,
23436: "drool" ,
23441: "droop" ,
23442: "drop" ,
23443: "drops" ,
23444: "drove" ,
23445: "drown" ,
23446: "dru" ,
23451: "drub" ,
23452: "drug" ,
23453: "drugs" ,
23454: "druid" ,
23455: "drum" ,
23456: "drums" ,
23461: "drunk" ,
23462: "dry" ,
23463: "dryad" ,
23464: "ds" ,
23465: "dt" ,
23466: "du" ,
23511: "dual" ,
23512: "duane" ,
23513: "dub" ,
23514: "dublin" ,
23515: "duck" ,
23516: "ducks" ,
23521: "duct" ,
23522: "dud" ,
23523: "dude" ,
23524: "due" ,
23525: "duel" ,
23526: "dues" ,
23531: "duet" ,
23532: "duff" ,
23533: "dug" ,
23534: "duke" ,
23535: "dull" ,
23536: "dully" ,
23541: "duly" ,
23542: "dumb" ,
23543: "dumbo" ,
23544: "dummy" ,
23545: "dump" ,
23546: "dumps" ,
23551: "dumpy" ,
23552: "dun" ,
23553: "dunce" ,
23554: "dune" ,
23555: "dung" ,
23556: "dunk" ,
23561: "duo" ,
23562: "dupe" ,
23563: "during" ,
23564: "dusk" ,
23565: "dusky" ,
23566: "dust" ,
23611: "dusty" ,
23612: "dutch" ,
23613: "duty" ,
23614: "dv" ,
23615: "dw" ,
23616: "dwarf" ,
23621: "dwell" ,
23622: "dwelt" ,
23623: "dwight" ,
23624: "dx" ,
23625: "dy" ,
23626: "dyad" ,
23631: "dye" ,
23632: "dyed" ,
23633: "dying" ,
23634: "dylan" ,
23635: "dynamo" ,
23636: "dz" ,
23641: "e" ,
23642: "e's" ,
23643: "ea" ,
23644: "each" ,
23645: "eager" ,
23646: "eagle" ,
23651: "ear" ,
23652: "earl" ,
23653: "early" ,
23654: "earn" ,
23655: "earns" ,
23656: "ears" ,
23661: "earth" ,
23662: "ease" ,
23663: "easel" ,
23664: "east" ,
23665: "easy" ,
23666: "eat" ,
24111: "eaten" ,
24112: "eater" ,
24113: "eats" ,
24114: "eave" ,
24115: "eaves" ,
24116: "eb" ,
24121: "ebb" ,
24122: "ebony" ,
24123: "ec" ,
24124: "echo" ,
24125: "ed" ,
24126: "eddie" ,
24131: "eddy" ,
24132: "eden" ,
24133: "edgar" ,
24134: "edge" ,
24135: "edges" ,
24136: "edgy" ,
24141: "edible" ,
24142: "edict" ,
24143: "edify" ,
24144: "edit" ,
24145: "edith" ,
24146: "editor" ,
24151: "edits" ,
24152: "edna" ,
24153: "edsel" ,
24154: "edwin" ,
24155: "ee" ,
24156: "eee" ,
24161: "eeee" ,
24162: "eeg" ,
24163: "eel" ,
24164: "eerie" ,
24165: "ef" ,
24166: "efface" ,
24211: "efg" ,
24212: "eflat" ,
24213: "eft" ,
24214: "eg" ,
24215: "egg" ,
24216: "eggs" ,
24221: "ego" ,
24222: "egress" ,
24223: "egret" ,
24224: "egypt" ,
24225: "eh" ,
24226: "ei" ,
24231: "eight" ,
24232: "ej" ,
24233: "eject" ,
24234: "ek" ,
24235: "ekg" ,
24236: "el" ,
24241: "elate" ,
24242: "elbow" ,
24243: "elder" ,
24244: "elect" ,
24245: "elegy" ,
24246: "elena" ,
24251: "eleven" ,
24252: "elf" ,
24253: "elfin" ,
24254: "eli" ,
24255: "elide" ,
24256: "eliot" ,
24261: "elite" ,
24262: "eliza" ,
24263: "elk" ,
24264: "elks" ,
24265: "ella" ,
24266: "ellen" ,
24311: "elm" ,
24312: "elmer" ,
24313: "elms" ,
24314: "elope" ,
24315: "elroy" ,
24316: "else" ,
24321: "elsie" ,
24322: "elton" ,
24323: "elude" ,
24324: "elves" ,
24325: "elvis" ,
24326: "ely" ,
24331: "em" ,
24332: "email" ,
24333: "embalm" ,
24334: "embed" ,
24335: "ember" ,
24336: "emcee" ,
24341: "emery" ,
24342: "emil" ,
24343: "emile" ,
24344: "emily" ,
24345: "emit" ,
24346: "emits" ,
24351: "emma" ,
24352: "emmy" ,
24353: "emote" ,
24354: "employ" ,
24355: "empty" ,
24356: "emu" ,
24361: "en" ,
24362: "enact" ,
24363: "enamel" ,
24364: "end" ,
24365: "ended" ,
24366: "endow" ,
24411: "ends" ,
24412: "enema" ,
24413: "enemy" ,
24414: "enigma" ,
24415: "enjoy" ,
24416: "enmity" ,
24421: "ennui" ,
24422: "enoch" ,
24423: "ensue" ,
24424: "enter" ,
24425: "entrap" ,
24426: "entry" ,
24431: "envoy" ,
24432: "envy" ,
24433: "eo" ,
24434: "eon" ,
24435: "eons" ,
24436: "ep" ,
24441: "epic" ,
24442: "epics" ,
24443: "epoch" ,
24444: "epoxy" ,
24445: "epsom" ,
24446: "eq" ,
24451: "equal" ,
24452: "equip" ,
24453: "er" ,
24454: "era" ,
24455: "erase" ,
24456: "erect" ,
24461: "ergo" ,
24462: "eric" ,
24463: "erica" ,
24464: "erie" ,
24465: "erik" ,
24466: "erin" ,
24511: "ernest" ,
24512: "ernie" ,
24513: "erode" ,
24514: "eros" ,
24515: "err" ,
24516: "errand" ,
24521: "errol" ,
24522: "error" ,
24523: "erupt" ,
24524: "es" ,
24525: "esp" ,
24526: "espy" ,
24531: "esq" ,
24532: "essay" ,
24533: "ester" ,
24534: "et" ,
24535: "eta" ,
24536: "etc" ,
24541: "etch" ,
24542: "ethel" ,
24543: "ether" ,
24544: "ethic" ,
24545: "ethos" ,
24546: "ethyl" ,
24551: "etude" ,
24552: "eu" ,
24553: "eureka" ,
24554: "ev" ,
24555: "eva" ,
24556: "evade" ,
24561: "evans" ,
24562: "eve" ,
24563: "even" ,
24564: "event" ,
24565: "ever" ,
24566: "every" ,
24611: "evict" ,
24612: "evil" ,
24613: "evita" ,
24614: "evoke" ,
24615: "evolve" ,
24616: "ew" ,
24621: "ewe" ,
24622: "ex" ,
24623: "exact" ,
24624: "exalt" ,
24625: "exam" ,
24626: "exams" ,
24631: "excel" ,
24632: "excess" ,
24633: "exec" ,
24634: "exert" ,
24635: "exile" ,
24636: "exist" ,
24641: "exit" ,
24642: "exits" ,
24643: "exodus" ,
24644: "expel" ,
24645: "expo" ,
24646: "extant" ,
24651: "extent" ,
24652: "extol" ,
24653: "extra" ,
24654: "exult" ,
24655: "exxon" ,
24656: "ey" ,
24661: "eye" ,
24662: "eyed" ,
24663: "eyes" ,
24664: "ez" ,
24665: "ezra" ,
24666: "f" ,
25111: "f#" ,
25112: "f's" ,
25113: "fa" ,
25114: "fable" ,
25115: "fabric" ,
25116: "face" ,
25121: "faces" ,
25122: "facet" ,
25123: "facile" ,
25124: "fact" ,
25125: "facts" ,
25126: "fad" ,
25131: "fade" ,
25132: "fads" ,
25133: "fail" ,
25134: "faint" ,
25135: "fair" ,
25136: "fairy" ,
25141: "faith" ,
25142: "fake" ,
25143: "faker" ,
25144: "fall" ,
25145: "false" ,
25146: "fame" ,
25151: "fan" ,
25152: "fancy" ,
25153: "fang" ,
25154: "fangs" ,
25155: "fanny" ,
25156: "fans" ,
25161: "far" ,
25162: "farce" ,
25163: "fare" ,
25164: "farm" ,
25165: "farms" ,
25166: "fast" ,
25211: "fat" ,
25212: "fatal" ,
25213: "fate" ,
25214: "father" ,
25215: "fats" ,
25216: "fatty" ,
25221: "fault" ,
25222: "fauna" ,
25223: "faust" ,
25224: "faux" ,
25225: "fawn" ,
25226: "fax" ,
25231: "faze" ,
25232: "fb" ,
25233: "fbi" ,
25234: "fc" ,
25235: "fd" ,
25236: "fe" ,
25241: "fear" ,
25242: "fears" ,
25243: "feast" ,
25244: "feat" ,
25245: "feb" ,
25246: "fed" ,
25251: "fee" ,
25252: "feeble" ,
25253: "feed" ,
25254: "feeds" ,
25255: "feel" ,
25256: "feels" ,
25261: "fees" ,
25262: "feet" ,
25263: "feign" ,
25264: "feint" ,
25265: "felice" ,
25266: "felix" ,
25311: "fell" ,
25312: "felon" ,
25313: "felt" ,
25314: "femur" ,
25315: "fence" ,
25316: "fend" ,
25321: "fern" ,
25322: "ferry" ,
25323: "fetal" ,
25324: "fetch" ,
25325: "fete" ,
25326: "fetid" ,
25331: "fetus" ,
25332: "feud" ,
25333: "fever" ,
25334: "few" ,
25335: "fez" ,
25336: "ff" ,
25341: "fff" ,
25342: "ffff" ,
25343: "fg" ,
25344: "fgh" ,
25345: "fh" ,
25346: "fi" ,
25351: "fiat" ,
25352: "fib" ,
25353: "fiber" ,
25354: "fickle" ,
25355: "fido" ,
25356: "field" ,
25361: "fiend" ,
25362: "fiery" ,
25363: "fife" ,
25364: "fifth" ,
25365: "fifty" ,
25366: "fig" ,
25411: "fight" ,
25412: "figs" ,
25413: "fiji" ,
25414: "filch" ,
25415: "file" ,
25416: "filed" ,
25421: "files" ,
25422: "filet" ,
25423: "fill" ,
25424: "filler" ,
25425: "filly" ,
25426: "film" ,
25431: "films" ,
25432: "filmy" ,
25433: "filth" ,
25434: "fin" ,
25435: "final" ,
25436: "finale" ,
25441: "finch" ,
25442: "find" ,
25443: "fine" ,
25444: "fined" ,
25445: "finer" ,
25446: "finite" ,
25451: "fink" ,
25452: "finn" ,
25453: "finny" ,
25454: "fir" ,
25455: "fire" ,
25456: "firm" ,
25461: "first" ,
25462: "fish" ,
25463: "fishy" ,
25464: "fist" ,
25465: "fit" ,
25466: "fits" ,
25511: "five" ,
25512: "fix" ,
25513: "fixed" ,
25514: "fizz" ,
25515: "fj" ,
25516: "fjord" ,
25521: "fk" ,
25522: "fl" ,
25523: "flab" ,
25524: "flag" ,
25525: "flail" ,
25526: "flair" ,
25531: "flak" ,
25532: "flake" ,
25533: "flaky" ,
25534: "flame" ,
25535: "flank" ,
25536: "flap" ,
25541: "flare" ,
25542: "flash" ,
25543: "flask" ,
25544: "flat" ,
25545: "flavor" ,
25546: "flaw" ,
25551: "flax" ,
25552: "flay" ,
25553: "flea" ,
25554: "fled" ,
25555: "flee" ,
25556: "fleet" ,
25561: "flesh" ,
25562: "flew" ,
25563: "flex" ,
25564: "flick" ,
25565: "flier" ,
25566: "flies" ,
25611: "flinch" ,
25612: "fling" ,
25613: "flint" ,
25614: "flip" ,
25615: "flirt" ,
25616: "flit" ,
25621: "flo" ,
25622: "float" ,
25623: "flock" ,
25624: "flog" ,
25625: "flood" ,
25626: "floor" ,
25631: "flop" ,
25632: "floppy" ,
25633: "flora" ,
25634: "flour" ,
25635: "flow" ,
25636: "flown" ,
25641: "floyd" ,
25642: "flu" ,
25643: "flub" ,
25644: "flue" ,
25645: "fluff" ,
25646: "fluid" ,
25651: "fluke" ,
25652: "flung" ,
25653: "flush" ,
25654: "flute" ,
25655: "flux" ,
25656: "fly" ,
25661: "flyer" ,
25662: "fm" ,
25663: "fn" ,
25664: "fo" ,
25665: "foal" ,
25666: "foam" ,
26111: "foamy" ,
26112: "fob" ,
26113: "focal" ,
26114: "focus" ,
26115: "fodder" ,
26116: "foe" ,
26121: "foes" ,
26122: "fog" ,
26123: "foggy" ,
26124: "fogy" ,
26125: "foil" ,
26126: "foist" ,
26131: "fold" ,
26132: "folio" ,
26133: "folk" ,
26134: "folly" ,
26135: "fond" ,
26136: "font" ,
26141: "food" ,
26142: "fool" ,
26143: "foot" ,
26144: "fop" ,
26145: "for" ,
26146: "foray" ,
26151: "force" ,
26152: "ford" ,
26153: "fore" ,
26154: "forge" ,
26155: "forgot" ,
26156: "fork" ,
26161: "form" ,
26162: "forms" ,
26163: "fort" ,
26164: "forte" ,
26165: "forth" ,
26166: "forty" ,
26211: "forum" ,
26212: "fossil" ,
26213: "foul" ,
26214: "found" ,
26215: "fount" ,
26216: "four" ,
26221: "fowl" ,
26222: "fox" ,
26223: "foxes" ,
26224: "foxy" ,
26225: "foyer" ,
26226: "fp" ,
26231: "fq" ,
26232: "fr" ,
26233: "frail" ,
26234: "frame" ,
26235: "france" ,
26236: "frank" ,
26241: "franz" ,
26242: "frau" ,
26243: "fraud" ,
26244: "fray" ,
26245: "freak" ,
26246: "fred" ,
26251: "free" ,
26252: "freed" ,
26253: "freer" ,
26254: "frenzy" ,
26255: "freon" ,
26256: "fresh" ,
26261: "fret" ,
26262: "freud" ,
26263: "fri" ,
26264: "friar" ,
26265: "fried" ,
26266: "fries" ,
26311: "frill" ,
26312: "frilly" ,
26313: "frisky" ,
26314: "fritz" ,
26315: "frock" ,
26316: "frog" ,
26321: "frogs" ,
26322: "from" ,
26323: "frond" ,
26324: "front" ,
26325: "frost" ,
26326: "froth" ,
26331: "frown" ,
26332: "froze" ,
26333: "fruit" ,
26334: "fry" ,
26335: "fs" ,
26336: "ft" ,
26341: "fu" ,
26342: "fudge" ,
26343: "fuel" ,
26344: "fugue" ,
26345: "fuji" ,
26346: "full" ,
26351: "fully" ,
26352: "fumble" ,
26353: "fume" ,
26354: "fumes" ,
26355: "fun" ,
26356: "fund" ,
26361: "funds" ,
26362: "fungi" ,
26363: "funk" ,
26364: "funky" ,
26365: "funny" ,
26366: "fur" ,
26411: "furl" ,
26412: "furry" ,
26413: "furs" ,
26414: "fury" ,
26415: "fuse" ,
26416: "fuss" ,
26421: "fussy" ,
26422: "fuzz" ,
26423: "fuzzy" ,
26424: "fv" ,
26425: "fw" ,
26426: "fx" ,
26431: "fy" ,
26432: "fyi" ,
26433: "fz" ,
26434: "g" ,
26435: "g's" ,
26436: "ga" ,
26441: "gab" ,
26442: "gable" ,
26443: "gadget" ,
26444: "gaea" ,
26445: "gaffe" ,
26446: "gag" ,
26451: "gags" ,
26452: "gail" ,
26453: "gaily" ,
26454: "gain" ,
26455: "gait" ,
26456: "gal" ,
26461: "gala" ,
26462: "galaxy" ,
26463: "gale" ,
26464: "gall" ,
26465: "gallop" ,
26466: "gam" ,
26511: "game" ,
26512: "games" ,
26513: "gamma" ,
26514: "gamut" ,
26515: "gamy" ,
26516: "gander" ,
26521: "gang" ,
26522: "gangs" ,
26523: "gap" ,
26524: "gape" ,
26525: "gapes" ,
26526: "gaps" ,
26531: "garb" ,
26532: "gargle" ,
26533: "garish" ,
26534: "gary" ,
26535: "gas" ,
26536: "gash" ,
26541: "gasp" ,
26542: "gasps" ,
26543: "gassy" ,
26544: "gate" ,
26545: "gates" ,
26546: "gator" ,
26551: "gauche" ,
26552: "gaudy" ,
26553: "gauge" ,
26554: "gaunt" ,
26555: "gauze" ,
26556: "gave" ,
26561: "gavel" ,
26562: "gawk" ,
26563: "gawky" ,
26564: "gay" ,
26565: "gaze" ,
26566: "gazed" ,
26611: "gazes" ,
26612: "gb" ,
26613: "gc" ,
26614: "gd" ,
26615: "ge" ,
26616: "gear" ,
26621: "gears" ,
26622: "gee" ,
26623: "geese" ,
26624: "gel" ,
26625: "geld" ,
26626: "gem" ,
26631: "gems" ,
26632: "gene" ,
26633: "genes" ,
26634: "genie" ,
26635: "genre" ,
26636: "gent" ,
26641: "gentry" ,
26642: "geo" ,
26643: "gerbil" ,
26644: "germ" ,
26645: "germs" ,
26646: "get" ,
26651: "gets" ,
26652: "gf" ,
26653: "gg" ,
26654: "ggg" ,
26655: "gggg" ,
26656: "gh" ,
26661: "ghetto" ,
26662: "ghi" ,
26663: "ghost" ,
26664: "ghoul" ,
26665: "ghq" ,
26666: "gi" ,
31111: "giant" ,
31112: "giddy" ,
31113: "gift" ,
31114: "gifts" ,
31115: "gig" ,
31116: "gil" ,
31121: "gila" ,
31122: "gild" ,
31123: "gill" ,
31124: "gills" ,
31125: "gilt" ,
31126: "gimme" ,
31131: "gimpy" ,
31132: "gin" ,
31133: "gina" ,
31134: "ginger" ,
31135: "gino" ,
31136: "gird" ,
31141: "girl" ,
31142: "girls" ,
31143: "girth" ,
31144: "gist" ,
31145: "give" ,
31146: "given" ,
31151: "gives" ,
31152: "gizmo" ,
31153: "gj" ,
31154: "gk" ,
31155: "gl" ,
31156: "glad" ,
31161: "glade" ,
31162: "glamor" ,
31163: "glance" ,
31164: "gland" ,
31165: "glare" ,
31166: "glass" ,
31211: "glaze" ,
31212: "gleam" ,
31213: "glean" ,
31214: "glee" ,
31215: "glen" ,
31216: "glenn" ,
31221: "glib" ,
31222: "glide" ,
31223: "glint" ,
31224: "gloat" ,
31225: "glob" ,
31226: "globe" ,
31231: "gloom" ,
31232: "glory" ,
31233: "gloss" ,
31234: "glove" ,
31235: "glow" ,
31236: "glows" ,
31241: "glue" ,
31242: "glued" ,
31243: "gluey" ,
31244: "gluing" ,
31245: "glum" ,
31246: "glut" ,
31251: "gm" ,
31252: "gmt" ,
31253: "gn" ,
31254: "gnash" ,
31255: "gnat" ,
31256: "gnaw" ,
31261: "gnaws" ,
31262: "gnome" ,
31263: "gnp" ,
31264: "gnu" ,
31265: "go" ,
31266: "goad" ,
31311: "goal" ,
31312: "goals" ,
31313: "goat" ,
31314: "goats" ,
31315: "gob" ,
31316: "god" ,
31321: "godly" ,
31322: "gods" ,
31323: "goes" ,
31324: "goggle" ,
31325: "gogh" ,
31326: "gogo" ,
31331: "going" ,
31332: "gold" ,
31333: "golf" ,
31334: "golly" ,
31335: "gomez" ,
31336: "gone" ,
31341: "gong" ,
31342: "goo" ,
31343: "good" ,
31344: "goods" ,
31345: "goody" ,
31346: "gooey" ,
31351: "goof" ,
31352: "goofy" ,
31353: "goon" ,
31354: "goose" ,
31355: "gordon" ,
31356: "gore" ,
31361: "gorge" ,
31362: "gory" ,
31363: "gosh" ,
31364: "gospel" ,
31365: "got" ,
31366: "gouge" ,
31411: "gould" ,
31412: "gourd" ,
31413: "gout" ,
31414: "govt" ,
31415: "gown" ,
31416: "gowns" ,
31421: "gp" ,
31422: "gpa" ,
31423: "gq" ,
31424: "gr" ,
31425: "grab" ,
31426: "grabs" ,
31431: "grace" ,
31432: "grad" ,
31433: "grade" ,
31434: "grady" ,
31435: "graft" ,
31436: "grail" ,
31441: "grain" ,
31442: "gram" ,
31443: "grams" ,
31444: "grand" ,
31445: "grant" ,
31446: "grape" ,
31451: "graph" ,
31452: "grasp" ,
31453: "grass" ,
31454: "grate" ,
31455: "grave" ,
31456: "gravel" ,
31461: "gravy" ,
31462: "gray" ,
31463: "graze" ,
31464: "great" ,
31465: "greed" ,
31466: "greedy" ,
31511: "greek" ,
31512: "green" ,
31513: "greet" ,
31514: "greg" ,
31515: "greta" ,
31516: "grew" ,
31521: "grey" ,
31522: "grid" ,
31523: "grief" ,
31524: "grieve" ,
31525: "grill" ,
31526: "grim" ,
31531: "grime" ,
31532: "grimy" ,
31533: "grin" ,
31534: "grind" ,
31535: "grins" ,
31536: "grip" ,
31541: "gripe" ,
31542: "grips" ,
31543: "grist" ,
31544: "grit" ,
31545: "groan" ,
31546: "grog" ,
31551: "groin" ,
31552: "groom" ,
31553: "groove" ,
31554: "grope" ,
31555: "gross" ,
31556: "group" ,
31561: "grout" ,
31562: "grove" ,
31563: "grow" ,
31564: "growl" ,
31565: "grown" ,
31566: "grows" ,
31611: "grub" ,
31612: "grubs" ,
31613: "gruff" ,
31614: "grunt" ,
31615: "gs" ,
31616: "gt" ,
31621: "gu" ,
31622: "guam" ,
31623: "guano" ,
31624: "guard" ,
31625: "guess" ,
31626: "guest" ,
31631: "gui" ,
31632: "guide" ,
31633: "guild" ,
31634: "guile" ,
31635: "guilt" ,
31636: "guise" ,
31641: "guitar" ,
31642: "gulag" ,
31643: "gulf" ,
31644: "gull" ,
31645: "gulls" ,
31646: "gully" ,
31651: "gulp" ,
31652: "gum" ,
31653: "gumbo" ,
31654: "gummy" ,
31655: "gun" ,
31656: "gunk" ,
31661: "guns" ,
31662: "guppy" ,
31663: "gurgle" ,
31664: "guru" ,
31665: "gus" ,
31666: "gush" ,
32111: "gust" ,
32112: "gusto" ,
32113: "gusts" ,
32114: "gusty" ,
32115: "gut" ,
32116: "guts" ,
32121: "gutsy" ,
32122: "guy" ,
32123: "guys" ,
32124: "gv" ,
32125: "gw" ,
32126: "gwen" ,
32131: "gx" ,
32132: "gy" ,
32133: "gym" ,
32134: "gyp" ,
32135: "gypsum" ,
32136: "gypsy" ,
32141: "gyro" ,
32142: "gz" ,
32143: "h" ,
32144: "h's" ,
32145: "h2o" ,
32146: "ha" ,
32151: "habit" ,
32152: "hack" ,
32153: "had" ,
32154: "hag" ,
32155: "haha" ,
32156: "haiku" ,
32161: "hail" ,
32162: "hair" ,
32163: "hairdo" ,
32164: "hairs" ,
32165: "hairy" ,
32166: "haiti" ,
32211: "hal" ,
32212: "half" ,
32213: "hall" ,
32214: "halls" ,
32215: "halo" ,
32216: "halt" ,
32221: "halts" ,
32222: "halve" ,
32223: "ham" ,
32224: "hamlet" ,
32225: "hammer" ,
32226: "hams" ,
32231: "hand" ,
32232: "handle" ,
32233: "hands" ,
32234: "handy" ,
32235: "hang" ,
32236: "hank" ,
32241: "hanna" ,
32242: "hans" ,
32243: "happy" ,
32244: "hard" ,
32245: "hardy" ,
32246: "hare" ,
32251: "harem" ,
32252: "hark" ,
32253: "harley" ,
32254: "harm" ,
32255: "harms" ,
32256: "harp" ,
32261: "harps" ,
32262: "harry" ,
32263: "harsh" ,
32264: "hart" ,
32265: "harv" ,
32266: "harvey" ,
32311: "has" ,
32312: "hash" ,
32313: "hasp" ,
32314: "haste" ,
32315: "hasty" ,
32316: "hat" ,
32321: "hatch" ,
32322: "hate" ,
32323: "hates" ,
32324: "hatred" ,
32325: "hats" ,
32326: "haul" ,
32331: "hauls" ,
32332: "haunt" ,
32333: "have" ,
32334: "haven" ,
32335: "havoc" ,
32336: "hawk" ,
32341: "hawks" ,
32342: "hay" ,
32343: "haydn" ,
32344: "hayes" ,
32345: "hazard" ,
32346: "haze" ,
32351: "hazel" ,
32352: "hazy" ,
32353: "hb" ,
32354: "hc" ,
32355: "hd" ,
32356: "hdtv" ,
32361: "he" ,
32362: "he'd" ,
32363: "he'll" ,
32364: "head" ,
32365: "heads" ,
32366: "heady" ,
32411: "heal" ,
32412: "heals" ,
32413: "heap" ,
32414: "heaps" ,
32415: "hear" ,
32416: "heard" ,
32421: "hears" ,
32422: "heart" ,
32423: "heat" ,
32424: "heath" ,
32425: "heats" ,
32426: "heave" ,
32431: "heaven" ,
32432: "heavy" ,
32433: "hebrew" ,
32434: "heck" ,
32435: "heckle" ,
32436: "hectic" ,
32441: "hedge" ,
32442: "heed" ,
32443: "heel" ,
32444: "heels" ,
32445: "heft" ,
32446: "hefty" ,
32451: "height" ,
32452: "heinz" ,
32453: "heir" ,
32454: "heirs" ,
32455: "held" ,
32456: "helen" ,
32461: "helga" ,
32462: "helix" ,
32463: "hell" ,
32464: "hello" ,
32465: "helm" ,
32466: "help" ,
32511: "hem" ,
32512: "hemp" ,
32513: "hems" ,
32514: "hen" ,
32515: "hence" ,
32516: "henry" ,
32521: "hens" ,
32522: "hep" ,
32523: "her" ,
32524: "herb" ,
32525: "herbs" ,
32526: "herd" ,
32531: "here" ,
32532: "hero" ,
32533: "herod" ,
32534: "heroic" ,
32535: "heron" ,
32536: "herr" ,
32541: "hers" ,
32542: "hertz" ,
32543: "hew" ,
32544: "hex" ,
32545: "hexed" ,
32546: "hey" ,
32551: "hf" ,
32552: "hg" ,
32553: "hh" ,
32554: "hhh" ,
32555: "hhhh" ,
32556: "hi" ,
32561: "hick" ,
32562: "hid" ,
32563: "hide" ,
32564: "hides" ,
32565: "high" ,
32566: "hij" ,
32611: "hijack" ,
32612: "hike" ,
32613: "hikes" ,
32614: "hill" ,
32615: "hills" ,
32616: "hilly" ,
32621: "hilt" ,
32622: "him" ,
32623: "hind" ,
32624: "hindu" ,
32625: "hinge" ,
32626: "hint" ,
32631: "hints" ,
32632: "hip" ,
32633: "hippo" ,
32634: "hips" ,
32635: "hiram" ,
32636: "hire" ,
32641: "hired" ,
32642: "hires" ,
32643: "his" ,
32644: "hiss" ,
32645: "hit" ,
32646: "hitch" ,
32651: "hits" ,
32652: "hiv" ,
32653: "hive" ,
32654: "hives" ,
32655: "hj" ,
32656: "hk" ,
32661: "hl" ,
32662: "hm" ,
32663: "hn" ,
32664: "ho" ,
32665: "hoagy" ,
32666: "hoard" ,
33111: "hoax" ,
33112: "hobby" ,
33113: "hobo" ,
33114: "hock" ,
33115: "hockey" ,
33116: "hoe" ,
33121: "hog" ,
33122: "hogan" ,
33123: "hogs" ,
33124: "hoist" ,
33125: "hold" ,
33126: "holds" ,
33131: "holdup" ,
33132: "hole" ,
33133: "holes" ,
33134: "holly" ,
33135: "holmes" ,
33136: "holy" ,
33141: "home" ,
33142: "honda" ,
33143: "hone" ,
33144: "honey" ,
33145: "honk" ,
33146: "honor" ,
33151: "hooch" ,
33152: "hood" ,
33153: "hoof" ,
33154: "hook" ,
33155: "hooks" ,
33156: "hookup" ,
33161: "hoop" ,
33162: "hoot" ,
33163: "hop" ,
33164: "hope" ,
33165: "hopes" ,
33166: "hops" ,
33211: "horde" ,
33212: "horn" ,
33213: "horny" ,
33214: "horse" ,
33215: "hose" ,
33216: "host" ,
33221: "hot" ,
33222: "hotel" ,
33223: "hotrod" ,
33224: "hound" ,
33225: "hour" ,
33226: "house" ,
33231: "hovel" ,
33232: "hover" ,
33233: "how" ,
33234: "howdy" ,
33235: "howl" ,
33236: "howls" ,
33241: "hoyle" ,
33242: "hp" ,
33243: "hq" ,
33244: "hr" ,
33245: "hrh" ,
33246: "hs" ,
33251: "ht" ,
33252: "hu" ,
33253: "hub" ,
33254: "hubbub" ,
33255: "hubby" ,
33256: "hubs" ,
33261: "hue" ,
33262: "hues" ,
33263: "huey" ,
33264: "huff" ,
33265: "hug" ,
33266: "huge" ,
33311: "hugh" ,
33312: "hugo" ,
33313: "hugs" ,
33314: "huh" ,
33315: "hula" ,
33316: "hulk" ,
33321: "hull" ,
33322: "hum" ,
33323: "human" ,
33324: "humid" ,
33325: "humor" ,
33326: "hump" ,
33331: "humps" ,
33332: "hums" ,
33333: "humus" ,
33334: "hun" ,
33335: "hunch" ,
33336: "hung" ,
33341: "hunk" ,
33342: "hunt" ,
33343: "hunts" ,
33344: "hurl" ,
33345: "huron" ,
33346: "hurrah" ,
33351: "hurry" ,
33352: "hurt" ,
33353: "hush" ,
33354: "husk" ,
33355: "husky" ,
33356: "hut" ,
33361: "hutch" ,
33362: "hv" ,
33363: "hw" ,
33364: "hwy" ,
33365: "hx" ,
33366: "hy" ,
33411: "hyde" ,
33412: "hydra" ,
33413: "hyena" ,
33414: "hymn" ,
33415: "hymnal" ,
33416: "hype" ,
33421: "hyper" ,
33422: "hypo" ,
33423: "hz" ,
33424: "i" ,
33425: "i'd" ,
33426: "i'll" ,
33431: "i'm" ,
33432: "i's" ,
33433: "i've" ,
33434: "ia" ,
33435: "ian" ,
33436: "ib" ,
33441: "ibid" ,
33442: "ibm" ,
33443: "ibsen" ,
33444: "ic" ,
33445: "icbm" ,
33446: "ice" ,
33451: "iced" ,
33452: "icicle" ,
33453: "icing" ,
33454: "icky" ,
33455: "icon" ,
33456: "icons" ,
33461: "icy" ,
33462: "id" ,
33463: "ida" ,
33464: "idaho" ,
33465: "idea" ,
33466: "ideal" ,
33511: "ideas" ,
33512: "idiom" ,
33513: "idiot" ,
33514: "idle" ,
33515: "idly" ,
33516: "idol" ,
33521: "idols" ,
33522: "ie" ,
33523: "if" ,
33524: "iffy" ,
33525: "ig" ,
33526: "igloo" ,
33531: "ignite" ,
33532: "igor" ,
33533: "ih" ,
33534: "ii" ,
33535: "iii" ,
33536: "iiii" ,
33541: "ij" ,
33542: "ijk" ,
33543: "ik" ,
33544: "ike" ,
33545: "il" ,
33546: "iliad" ,
33551: "ill" ,
33552: "im" ,
33553: "image" ,
33554: "imbibe" ,
33555: "imf" ,
33556: "imp" ,
33561: "impel" ,
33562: "imply" ,
33563: "import" ,
33564: "imps" ,
33565: "in" ,
33566: "inane" ,
33611: "inc" ,
33612: "inca" ,
33613: "incest" ,
33614: "inch" ,
33615: "incur" ,
33616: "index" ,
33621: "india" ,
33622: "indies" ,
33623: "indy" ,
33624: "inept" ,
33625: "inert" ,
33626: "infamy" ,
33631: "infect" ,
33632: "infer" ,
33633: "info" ,
33634: "ingot" ,
33635: "inhale" ,
33636: "ink" ,
33641: "inky" ,
33642: "inlay" ,
33643: "inlet" ,
33644: "inn" ,
33645: "inner" ,
33646: "inns" ,
33651: "input" ,
33652: "insect" ,
33653: "inset" ,
33654: "insult" ,
33655: "intel" ,
33656: "intend" ,
33661: "inter" ,
33662: "into" ,
33663: "intro" ,
33664: "invoke" ,
33665: "io" ,
33666: "ion" ,
34111: "ions" ,
34112: "iota" ,
34113: "iou" ,
34114: "iowa" ,
34115: "ip" ,
34116: "iq" ,
34121: "ir" ,
34122: "ira" ,
34123: "iran" ,
34124: "iraq" ,
34125: "iraqi" ,
34126: "irate" ,
34131: "ire" ,
34132: "irene" ,
34133: "iris" ,
34134: "irish" ,
34135: "irk" ,
34136: "irked" ,
34141: "irma" ,
34142: "iron" ,
34143: "irons" ,
34144: "irony" ,
34145: "irvin" ,
34146: "is" ,
34151: "isaac" ,
34152: "isabel" ,
34153: "islam" ,
34154: "island" ,
34155: "isle" ,
34156: "ism" ,
34161: "isn't" ,
34162: "israel" ,
34163: "issue" ,
34164: "isuzu" ,
34165: "it" ,
34166: "it'd" ,
34211: "it'll" ,
34212: "it's" ,
34213: "italy" ,
34214: "itch" ,
34215: "itchy" ,
34216: "item" ,
34221: "items" ,
34222: "iu" ,
34223: "iud" ,
34224: "iv" ,
34225: "ivan" ,
34226: "ivory" ,
34231: "ivy" ,
34232: "iw" ,
34233: "ix" ,
34234: "iy" ,
34235: "iz" ,
34236: "j" ,
34241: "j's" ,
34242: "ja" ,
34243: "jab" ,
34244: "jack" ,
34245: "jackal" ,
34246: "jacob" ,
34251: "jade" ,
34252: "jaded" ,
34253: "jag" ,
34254: "jaguar" ,
34255: "jail" ,
34256: "jam" ,
34261: "jamb" ,
34262: "james" ,
34263: "jan" ,
34264: "jane" ,
34265: "janet" ,
34266: "janis" ,
34311: "japan" ,
34312: "jar" ,
34313: "jars" ,
34314: "jason" ,
34315: "jaunt" ,
34316: "java" ,
34321: "jaw" ,
34322: "jaws" ,
34323: "jay" ,
34324: "jazz" ,
34325: "jazzy" ,
34326: "jb" ,
34331: "jc" ,
34332: "jd" ,
34333: "je" ,
34334: "jean" ,
34335: "jeans" ,
34336: "jed" ,
34341: "jedi" ,
34342: "jeep" ,
34343: "jeer" ,
34344: "jeers" ,
34345: "jeff" ,
34346: "jello" ,
34351: "jelly" ,
34352: "jenny" ,
34353: "jerk" ,
34354: "jerks" ,
34355: "jerky" ,
34356: "jerry" ,
34361: "jersey" ,
34362: "jesse" ,
34363: "jest" ,
34364: "jesus" ,
34365: "jet" ,
34366: "jets" ,
34411: "jew" ,
34412: "jewel" ,
34413: "jewish" ,
34414: "jf" ,
34415: "jfk" ,
34416: "jg" ,
34421: "jh" ,
34422: "ji" ,
34423: "jiffy" ,
34424: "jig" ,
34425: "jiggle" ,
34426: "jigs" ,
34431: "jill" ,
34432: "jilt" ,
34433: "jim" ,
34434: "jimmy" ,
34435: "jinx" ,
34436: "jive" ,
34441: "jj" ,
34442: "jjj" ,
34443: "jjjj" ,
34444: "jk" ,
34445: "jkl" ,
34446: "jl" ,
34451: "jm" ,
34452: "jn" ,
34453: "jo" ,
34454: "joan" ,
34455: "job" ,
34456: "jobs" ,
34461: "jock" ,
34462: "jockey" ,
34463: "jody" ,
34464: "joe" ,
34465: "joel" ,
34466: "joey" ,
34511: "jog" ,
34512: "jogs" ,
34513: "john" ,
34514: "join" ,
34515: "joins" ,
34516: "joint" ,
34521: "joke" ,
34522: "joker" ,
34523: "jokes" ,
34524: "jolly" ,
34525: "jolt" ,
34526: "jonas" ,
34531: "jones" ,
34532: "jose" ,
34533: "josef" ,
34534: "josh" ,
34535: "joshua" ,
34536: "jostle" ,
34541: "jot" ,
34542: "jots" ,
34543: "joust" ,
34544: "jove" ,
34545: "jowl" ,
34546: "jowls" ,
34551: "joy" ,
34552: "joyce" ,
34553: "jp" ,
34554: "jq" ,
34555: "jr" ,
34556: "js" ,
34561: "jt" ,
34562: "ju" ,
34563: "juan" ,
34564: "judas" ,
34565: "jude" ,
34566: "judge" ,
34611: "judo" ,
34612: "judy" ,
34613: "jug" ,
34614: "juggle" ,
34615: "jugs" ,
34616: "juice" ,
34621: "juicy" ,
34622: "jul" ,
34623: "julep" ,
34624: "jules" ,
34625: "julia" ,
34626: "julie" ,
34631: "julio" ,
34632: "july" ,
34633: "jumbo" ,
34634: "jump" ,
34635: "jumps" ,
34636: "jumpy" ,
34641: "jun" ,
34642: "june" ,
34643: "jung" ,
34644: "junk" ,
34645: "junky" ,
34646: "juno" ,
34651: "junta" ,
34652: "juror" ,
34653: "jury" ,
34654: "just" ,
34655: "jut" ,
34656: "jute" ,
34661: "jv" ,
34662: "jw" ,
34663: "jx" ,
34664: "jy" ,
34665: "jz" ,
34666: "k" ,
35111: "k's" ,
35112: "ka" ,
35113: "kafka" ,
35114: "kale" ,
35115: "kane" ,
35116: "kansas" ,
35121: "kant" ,
35122: "kappa" ,
35123: "kaput" ,
35124: "karate" ,
35125: "karen" ,
35126: "karl" ,
35131: "karma" ,
35132: "karol" ,
35133: "kate" ,
35134: "kathy" ,
35135: "katie" ,
35136: "kay" ,
35141: "kayak" ,
35142: "kayo" ,
35143: "kazoo" ,
35144: "kb" ,
35145: "kc" ,
35146: "kd" ,
35151: "ke" ,
35152: "keats" ,
35153: "kebob" ,
35154: "keel" ,
35155: "keen" ,
35156: "keep" ,
35161: "keeps" ,
35162: "keg" ,
35163: "kegs" ,
35164: "keith" ,
35165: "kelly" ,
35166: "kelp" ,
35211: "ken" ,
35212: "kennel" ,
35213: "kent" ,
35214: "kept" ,
35215: "kerry" ,
35216: "kettle" ,
35221: "kevin" ,
35222: "key" ,
35223: "keyed" ,
35224: "keys" ,
35225: "kf" ,
35226: "kg" ,
35231: "kgb" ,
35232: "kh" ,
35233: "khaki" ,
35234: "khan" ,
35235: "khz" ,
35236: "ki" ,
35241: "kibitz" ,
35242: "kick" ,
35243: "kicks" ,
35244: "kid" ,
35245: "kidney" ,
35246: "kids" ,
35251: "kill" ,
35252: "kills" ,
35253: "kiln" ,
35254: "kilo" ,
35255: "kilt" ,
35256: "kilts" ,
35261: "kim" ,
35262: "kin" ,
35263: "kind" ,
35264: "kinds" ,
35265: "king" ,
35266: "kings" ,
35311: "kink" ,
35312: "kinky" ,
35313: "kiosk" ,
35314: "kirby" ,
35315: "kirk" ,
35316: "kiss" ,
35321: "kit" ,
35322: "kite" ,
35323: "kites" ,
35324: "kitty" ,
35325: "kiwi" ,
35326: "kj" ,
35331: "kk" ,
35332: "kkk" ,
35333: "kkkk" ,
35334: "kl" ,
35335: "klan" ,
35336: "klaus" ,
35341: "klaxon" ,
35342: "klein" ,
35343: "klm" ,
35344: "klutz" ,
35345: "km" ,
35346: "kn" ,
35351: "knack" ,
35352: "knave" ,
35353: "knead" ,
35354: "knee" ,
35355: "kneel" ,
35356: "knees" ,
35361: "knelt" ,
35362: "knew" ,
35363: "knife" ,
35364: "knight" ,
35365: "knit" ,
35366: "knits" ,
35411: "knob" ,
35412: "knobs" ,
35413: "knock" ,
35414: "knot" ,
35415: "knots" ,
35416: "know" ,
35421: "known" ,
35422: "knows" ,
35423: "knox" ,
35424: "ko" ,
35425: "koala" ,
35426: "koan" ,
35431: "kodak" ,
35432: "kong" ,
35433: "kook" ,
35434: "kooks" ,
35435: "kooky" ,
35436: "koran" ,
35441: "korea" ,
35442: "kp" ,
35443: "kq" ,
35444: "kr" ,
35445: "kraft" ,
35446: "kraut" ,
35451: "kris" ,
35452: "ks" ,
35453: "kt" ,
35454: "ku" ,
35455: "kudo" ,
35456: "kudos" ,
35461: "kudzu" ,
35462: "kurt" ,
35463: "kv" ,
35464: "kw" ,
35465: "kx" ,
35466: "ky" ,
35511: "kz" ,
35512: "l" ,
35513: "l's" ,
35514: "la" ,
35515: "lab" ,
35516: "label" ,
35521: "labor" ,
35522: "labs" ,
35523: "lace" ,
35524: "laces" ,
35525: "lack" ,
35526: "lacks" ,
35531: "lacy" ,
35532: "lad" ,
35533: "ladder" ,
35534: "ladle" ,
35535: "lads" ,
35536: "lady" ,
35541: "lag" ,
35542: "lager" ,
35543: "lagoon" ,
35544: "lags" ,
35545: "laid" ,
35546: "lair" ,
35551: "lake" ,
35552: "lakes" ,
35553: "lam" ,
35554: "lamar" ,
35555: "lamb" ,
35556: "lambs" ,
35561: "lame" ,
35562: "lamp" ,
35563: "lamps" ,
35564: "lana" ,
35565: "lance" ,
35566: "land" ,
35611: "lands" ,
35612: "lane" ,
35613: "lanky" ,
35614: "laos" ,
35615: "lap" ,
35616: "lapel" ,
35621: "laps" ,
35622: "lapse" ,
35623: "lara" ,
35624: "lard" ,
35625: "large" ,
35626: "lark" ,
35631: "larks" ,
35632: "larry" ,
35633: "larva" ,
35634: "larynx" ,
35635: "laser" ,
35636: "lash" ,
35641: "lass" ,
35642: "lasso" ,
35643: "last" ,
35644: "latch" ,
35645: "late" ,
35646: "later" ,
35651: "latest" ,
35652: "latex" ,
35653: "lathe" ,
35654: "latin" ,
35655: "laud" ,
35656: "laugh" ,
35661: "launch" ,
35662: "laura" ,
35663: "lava" ,
35664: "law" ,
35665: "lawn" ,
35666: "lawns" ,
36111: "laws" ,
36112: "lawson" ,
36113: "lax" ,
36114: "lay" ,
36115: "layer" ,
36116: "layla" ,
36121: "lays" ,
36122: "lazy" ,
36123: "lb" ,
36124: "lbj" ,
36125: "lbs" ,
36126: "lc" ,
36131: "lcd" ,
36132: "ld" ,
36133: "le" ,
36134: "lead" ,
36135: "leads" ,
36136: "leaf" ,
36141: "leafy" ,
36142: "leah" ,
36143: "leak" ,
36144: "leaks" ,
36145: "leaky" ,
36146: "lean" ,
36151: "leap" ,
36152: "leaps" ,
36153: "lear" ,
36154: "learn" ,
36155: "leary" ,
36156: "lease" ,
36161: "leash" ,
36162: "least" ,
36163: "leave" ,
36164: "led" ,
36165: "leda" ,
36166: "ledge" ,
36211: "lee" ,
36212: "leech" ,
36213: "leer" ,
36214: "leers" ,
36215: "leery" ,
36216: "leeway" ,
36221: "left" ,
36222: "lefty" ,
36223: "leg" ,
36224: "legacy" ,
36225: "legal" ,
36226: "legion" ,
36231: "legs" ,
36232: "lei" ,
36233: "lemon" ,
36234: "len" ,
36235: "lend" ,
36236: "lends" ,
36241: "length" ,
36242: "lenin" ,
36243: "lenny" ,
36244: "lens" ,
36245: "lent" ,
36246: "leo" ,
36251: "leon" ,
36252: "leona" ,
36253: "leper" ,
36254: "leroy" ,
36255: "less" ,
36256: "lest" ,
36261: "let" ,
36262: "let's" ,
36263: "lets" ,
36264: "letter" ,
36265: "levee" ,
36266: "level" ,
36311: "lever" ,
36312: "levis" ,
36313: "levy" ,
36314: "lewd" ,
36315: "lewis" ,
36316: "lf" ,
36321: "lg" ,
36322: "lh" ,
36323: "li" ,
36324: "liar" ,
36325: "liars" ,
36326: "lib" ,
36331: "libel" ,
36332: "libido" ,
36333: "libya" ,
36334: "lice" ,
36335: "lick" ,
36336: "licks" ,
36341: "lid" ,
36342: "lids" ,
36343: "lie" ,
36344: "lied" ,
36345: "lien" ,
36346: "lies" ,
36351: "lieu" ,
36352: "lieut" ,
36353: "life" ,
36354: "lift" ,
36355: "light" ,
36356: "like" ,
36361: "liked" ,
36362: "likes" ,
36363: "lil" ,
36364: "lilac" ,
36365: "lilt" ,
36366: "lily" ,
36411: "lima" ,
36412: "limb" ,
36413: "limbo" ,
36414: "limbs" ,
36415: "lime" ,
36416: "limit" ,
36421: "limp" ,
36422: "limps" ,
36423: "linda" ,
36424: "line" ,
36425: "linen" ,
36426: "lines" ,
36431: "lingo" ,
36432: "link" ,
36433: "lint" ,
36434: "linus" ,
36435: "lion" ,
36436: "lip" ,
36441: "lips" ,
36442: "liquid" ,
36443: "lira" ,
36444: "lisa" ,
36445: "lisp" ,
36446: "list" ,
36451: "listen" ,
36452: "lists" ,
36453: "liszt" ,
36454: "lit" ,
36455: "litton" ,
36456: "live" ,
36461: "liver" ,
36462: "livid" ,
36463: "liz" ,
36464: "liza" ,
36465: "lizzie" ,
36466: "lj" ,
36511: "lk" ,
36512: "ll" ,
36513: "lll" ,
36514: "llll" ,
36515: "lloyd" ,
36516: "lm" ,
36521: "lmn" ,
36522: "ln" ,
36523: "lo" ,
36524: "load" ,
36525: "loaf" ,
36526: "loam" ,
36531: "loamy" ,
36532: "loan" ,
36533: "lob" ,
36534: "lobby" ,
36535: "lobe" ,
36536: "lobs" ,
36541: "local" ,
36542: "loch" ,
36543: "lock" ,
36544: "locks" ,
36545: "lode" ,
36546: "lodge" ,
36551: "loft" ,
36552: "lofty" ,
36553: "log" ,
36554: "logan" ,
36555: "logic" ,
36556: "logo" ,
36561: "logs" ,
36562: "loin" ,
36563: "loins" ,
36564: "lois" ,
36565: "loiter" ,
36566: "loki" ,
36611: "lola" ,
36612: "loll" ,
36613: "lone" ,
36614: "loner" ,
36615: "long" ,
36616: "longs" ,
36621: "look" ,
36622: "looks" ,
36623: "loom" ,
36624: "loon" ,
36625: "loony" ,
36626: "loop" ,
36631: "loose" ,
36632: "loot" ,
36633: "lop" ,
36634: "lopez" ,
36635: "lops" ,
36636: "lord" ,
36641: "lore" ,
36642: "loren" ,
36643: "lose" ,
36644: "loser" ,
36645: "loses" ,
36646: "loss" ,
36651: "lost" ,
36652: "lot" ,
36653: "lots" ,
36654: "lotto" ,
36655: "lotus" ,
36656: "lou" ,
36661: "loud" ,
36662: "louis" ,
36663: "louise" ,
36664: "louse" ,
36665: "lousy" ,
36666: "lout" ,
41111: "love" ,
41112: "loved" ,
41113: "lover" ,
41114: "low" ,
41115: "lower" ,
41116: "lowry" ,
41121: "lox" ,
41122: "loyal" ,
41123: "lp" ,
41124: "lq" ,
41125: "lr" ,
41126: "ls" ,
41131: "lsd" ,
41132: "lt" ,
41133: "ltd" ,
41134: "lu" ,
41135: "luau" ,
41136: "lucas" ,
41141: "luce" ,
41142: "lucia" ,
41143: "lucid" ,
41144: "luck" ,
41145: "lucky" ,
41146: "lucy" ,
41151: "ludwig" ,
41152: "lug" ,
41153: "luger" ,
41154: "lugs" ,
41155: "luis" ,
41156: "luke" ,
41161: "lull" ,
41162: "lulu" ,
41163: "lump" ,
41164: "lumps" ,
41165: "lumpy" ,
41166: "luna" ,
41211: "lunar" ,
41212: "lunch" ,
41213: "lung" ,
41214: "lunge" ,
41215: "lungs" ,
41216: "lurch" ,
41221: "lure" ,
41222: "lurid" ,
41223: "lurk" ,
41224: "lurks" ,
41225: "lush" ,
41226: "lust" ,
41231: "lusty" ,
41232: "lute" ,
41233: "luxury" ,
41234: "lv" ,
41235: "lw" ,
41236: "lx" ,
41241: "ly" ,
41242: "lye" ,
41243: "lying" ,
41244: "lyle" ,
41245: "lymph" ,
41246: "lynch" ,
41251: "lynn" ,
41252: "lynx" ,
41253: "lyre" ,
41254: "lyric" ,
41255: "lz" ,
41256: "m" ,
41261: "m&m" ,
41262: "m's" ,
41263: "m-16" ,
41264: "ma" ,
41265: "ma'am" ,
41266: "mabel" ,
41311: "mac" ,
41312: "macaw" ,
41313: "mace" ,
41314: "macho" ,
41315: "macro" ,
41316: "mad" ,
41321: "madam" ,
41322: "made" ,
41323: "madly" ,
41324: "madman" ,
41325: "mafia" ,
41326: "magic" ,
41331: "magma" ,
41332: "magnet" ,
41333: "magoo" ,
41334: "magpie" ,
41335: "maid" ,
41336: "maids" ,
41341: "mail" ,
41342: "maim" ,
41343: "maims" ,
41344: "main" ,
41345: "maine" ,
41346: "maize" ,
41351: "maj" ,
41352: "major" ,
41353: "make" ,
41354: "malady" ,
41355: "male" ,
41356: "malice" ,
41361: "mall" ,
41362: "malls" ,
41363: "malt" ,
41364: "mama" ,
41365: "mambo" ,
41366: "mammal" ,
41411: "man" ,
41412: "mane" ,
41413: "mango" ,
41414: "mania" ,
41415: "manic" ,
41416: "manly" ,
41421: "manna" ,
41422: "manor" ,
41423: "mantle" ,
41424: "many" ,
41425: "mao" ,
41426: "map" ,
41431: "maple" ,
41432: "maps" ,
41433: "mar" ,
41434: "marble" ,
41435: "march" ,
41436: "marco" ,
41441: "mare" ,
41442: "mares" ,
41443: "marge" ,
41444: "margo" ,
41445: "maria" ,
41446: "marie" ,
41451: "marine" ,
41452: "mario" ,
41453: "mark" ,
41454: "marks" ,
41455: "marlin" ,
41456: "marrow" ,
41461: "marry" ,
41462: "mars" ,
41463: "marsh" ,
41464: "mart" ,
41465: "marty" ,
41466: "martyr" ,
41511: "marx" ,
41512: "mary" ,
41513: "mash" ,
41514: "mask" ,
41515: "masks" ,
41516: "mason" ,
41521: "mass" ,
41522: "mast" ,
41523: "masts" ,
41524: "mat" ,
41525: "match" ,
41526: "mate" ,
41531: "mated" ,
41532: "mates" ,
41533: "math" ,
41534: "mats" ,
41535: "matt" ,
41536: "matzo" ,
41541: "maud" ,
41542: "maude" ,
41543: "maul" ,
41544: "mauls" ,
41545: "maw" ,
41546: "max" ,
41551: "maxim" ,
41552: "may" ,
41553: "maybe" ,
41554: "mayhem" ,
41555: "mayo" ,
41556: "mayor" ,
41561: "mazda" ,
41562: "maze" ,
41563: "mazes" ,
41564: "mb" ,
41565: "mba" ,
41566: "mc" ,
41611: "mccoy" ,
41612: "mcgee" ,
41613: "md" ,
41614: "me" ,
41615: "meadow" ,
41616: "meal" ,
41621: "meals" ,
41622: "mean" ,
41623: "means" ,
41624: "meant" ,
41625: "meat" ,
41626: "meaty" ,
41631: "mecca" ,
41632: "medal" ,
41633: "media" ,
41634: "medic" ,
41635: "medley" ,
41636: "meek" ,
41641: "meet" ,
41642: "meets" ,
41643: "meg" ,
41644: "meld" ,
41645: "melee" ,
41646: "mellow" ,
41651: "melody" ,
41652: "melon" ,
41653: "melt" ,
41654: "melts" ,
41655: "memo" ,
41656: "memoir" ,
41661: "men" ,
41662: "mend" ,
41663: "mends" ,
41664: "menu" ,
41665: "meow" ,
41666: "mercy" ,
42111: "mere" ,
42112: "merge" ,
42113: "merit" ,
42114: "merry" ,
42115: "mesa" ,
42116: "mesh" ,
42121: "mess" ,
42122: "messy" ,
42123: "met" ,
42124: "metal" ,
42125: "meteor" ,
42126: "meter" ,
42131: "metro" ,
42132: "meyer" ,
42133: "mf" ,
42134: "mg" ,
42135: "mgm" ,
42136: "mgmt" ,
42141: "mh" ,
42142: "mi" ,
42143: "mia" ,
42144: "miami" ,
42145: "mice" ,
42146: "mickey" ,
42151: "micro" ,
42152: "mid" ,
42153: "midas" ,
42154: "midst" ,
42155: "mig" ,
42156: "might" ,
42161: "migs" ,
42162: "mike" ,
42163: "mild" ,
42164: "mildew" ,
42165: "mile" ,
42166: "miles" ,
42211: "milk" ,
42212: "milky" ,
42213: "mill" ,
42214: "mills" ,
42215: "milo" ,
42216: "mime" ,
42221: "mimes" ,
42222: "mimi" ,
42223: "mimic" ,
42224: "mince" ,
42225: "mind" ,
42226: "minds" ,
42231: "mine" ,
42232: "mined" ,
42233: "miner" ,
42234: "mines" ,
42235: "mini" ,
42236: "mink" ,
42241: "minnow" ,
42242: "minor" ,
42243: "mint" ,
42244: "mints" ,
42245: "minty" ,
42246: "minus" ,
42251: "mirage" ,
42252: "mire" ,
42253: "mired" ,
42254: "mirth" ,
42255: "mirv" ,
42256: "misc" ,
42261: "miser" ,
42262: "misery" ,
42263: "miss" ,
42264: "mist" ,
42265: "mists" ,
42266: "misty" ,
42311: "mit" ,
42312: "mite" ,
42313: "mites" ,
42314: "mitt" ,
42315: "mitts" ,
42316: "mix" ,
42321: "mixed" ,
42322: "mixer" ,
42323: "mixes" ,
42324: "mixup" ,
42325: "mj" ,
42326: "mk" ,
42331: "ml" ,
42332: "mm" ,
42333: "mmm" ,
42334: "mmmm" ,
42335: "mn" ,
42336: "mno" ,
42341: "mo" ,
42342: "moan" ,
42343: "moans" ,
42344: "moat" ,
42345: "mob" ,
42346: "mobil" ,
42351: "mobs" ,
42352: "moby" ,
42353: "mock" ,
42354: "mocks" ,
42355: "mod" ,
42356: "mode" ,
42361: "model" ,
42362: "modem" ,
42363: "moe" ,
42364: "mogul" ,
42365: "moist" ,
42366: "mojo" ,
42411: "molar" ,
42412: "mold" ,
42413: "molds" ,
42414: "mole" ,
42415: "moles" ,
42416: "molly" ,
42421: "molt" ,
42422: "molten" ,
42423: "mom" ,
42424: "momma" ,
42425: "mommy" ,
42426: "mon" ,
42431: "mona" ,
42432: "money" ,
42433: "monk" ,
42434: "monkey" ,
42435: "mono" ,
42436: "month" ,
42441: "monty" ,
42442: "moo" ,
42443: "mooch" ,
42444: "mood" ,
42445: "moods" ,
42446: "moody" ,
42451: "moon" ,
42452: "moons" ,
42453: "moor" ,
42454: "moore" ,
42455: "moose" ,
42456: "mop" ,
42461: "mope" ,
42462: "mopes" ,
42463: "mops" ,
42464: "moral" ,
42465: "morale" ,
42466: "morbid" ,
42511: "more" ,
42512: "morn" ,
42513: "moron" ,
42514: "morph" ,
42515: "morse" ,
42516: "morsel" ,
42521: "mort" ,
42522: "mosaic" ,
42523: "moses" ,
42524: "moss" ,
42525: "mossy" ,
42526: "most" ,
42531: "mote" ,
42532: "motel" ,
42533: "moth" ,
42534: "mother" ,
42535: "moths" ,
42536: "motif" ,
42541: "motor" ,
42542: "motto" ,
42543: "mound" ,
42544: "mount" ,
42545: "mourn" ,
42546: "mouse" ,
42551: "mousy" ,
42552: "mouth" ,
42553: "move" ,
42554: "moved" ,
42555: "moves" ,
42556: "movie" ,
42561: "mow" ,
42562: "mowed" ,
42563: "mower" ,
42564: "mows" ,
42565: "moxie" ,
42566: "mp" ,
42611: "mpg" ,
42612: "mph" ,
42613: "mq" ,
42614: "mr" ,
42615: "mrs" ,
42616: "ms" ,
42621: "msdos" ,
42622: "msg" ,
42623: "mt" ,
42624: "mu" ,
42625: "much" ,
42626: "muck" ,
42631: "mucus" ,
42632: "mud" ,
42633: "muddy" ,
42634: "muff" ,
42635: "muffin" ,
42636: "mug" ,
42641: "muggy" ,
42642: "mugs" ,
42643: "mulch" ,
42644: "mule" ,
42645: "mules" ,
42646: "mull" ,
42651: "mum" ,
42652: "mumble" ,
42653: "mummy" ,
42654: "mumps" ,
42655: "munch" ,
42656: "mural" ,
42661: "muriel" ,
42662: "murk" ,
42663: "murky" ,
42664: "muse" ,
42665: "muses" ,
42666: "mush" ,
43111: "mushy" ,
43112: "music" ,
43113: "musk" ,
43114: "musky" ,
43115: "muslim" ,
43116: "muss" ,
43121: "must" ,
43122: "musty" ,
43123: "mute" ,
43124: "muted" ,
43125: "mutt" ,
43126: "muzak" ,
43131: "mv" ,
43132: "mw" ,
43133: "mx" ,
43134: "my" ,
43135: "mylar" ,
43136: "mynah" ,
43141: "myob" ,
43142: "myopia" ,
43143: "myra" ,
43144: "myron" ,
43145: "myself" ,
43146: "myth" ,
43151: "myths" ,
43152: "mz" ,
43153: "n" ,
43154: "n's" ,
43155: "na" ,
43156: "nab" ,
43161: "nabs" ,
43162: "nacl" ,
43163: "nag" ,
43164: "nags" ,
43165: "nail" ,
43166: "nails" ,
43211: "naive" ,
43212: "naked" ,
43213: "name" ,
43214: "named" ,
43215: "names" ,
43216: "nan" ,
43221: "nancy" ,
43222: "naomi" ,
43223: "nap" ,
43224: "nape" ,
43225: "napkin" ,
43226: "naps" ,
43231: "nasa" ,
43232: "nasal" ,
43233: "nash" ,
43234: "nasty" ,
43235: "nat" ,
43236: "natal" ,
43241: "nate" ,
43242: "nato" ,
43243: "nature" ,
43244: "nausea" ,
43245: "naval" ,
43246: "navel" ,
43251: "navy" ,
43252: "nay" ,
43253: "nazi" ,
43254: "nb" ,
43255: "nc" ,
43256: "nd" ,
43261: "ne" ,
43262: "near" ,
43263: "nearby" ,
43264: "neat" ,
43265: "neck" ,
43266: "necks" ,
43311: "ned" ,
43312: "need" ,
43313: "needs" ,
43314: "needy" ,
43315: "negate" ,
43316: "negro" ,
43321: "neigh" ,
43322: "neil" ,
43323: "nell" ,
43324: "neon" ,
43325: "nerd" ,
43326: "nerve" ,
43331: "nest" ,
43332: "nests" ,
43333: "net" ,
43334: "nets" ,
43335: "never" ,
43336: "new" ,
43341: "newly" ,
43342: "news" ,
43343: "newt" ,
43344: "next" ,
43345: "nf" ,
43346: "ng" ,
43351: "nguyen" ,
43352: "nh" ,
43353: "ni" ,
43354: "nice" ,
43355: "nicer" ,
43356: "nick" ,
43361: "nickel" ,
43362: "nico" ,
43363: "niece" ,
43364: "nifty" ,
43365: "night" ,
43366: "nil" ,
43411: "nile" ,
43412: "nina" ,
43413: "nine" ,
43414: "ninja" ,
43415: "ninth" ,
43416: "niobe" ,
43421: "nip" ,
43422: "nips" ,
43423: "nitwit" ,
43424: "nix" ,
43425: "nixon" ,
43426: "nj" ,
43431: "nk" ,
43432: "nl" ,
43433: "nm" ,
43434: "nn" ,
43435: "nne" ,
43436: "nnn" ,
43441: "nnnn" ,
43442: "nnw" ,
43443: "no" ,
43444: "noah" ,
43445: "noble" ,
43446: "nod" ,
43451: "node" ,
43452: "nods" ,
43453: "noel" ,
43454: "noise" ,
43455: "noisy" ,
43456: "nomad" ,
43461: "none" ,
43462: "nono" ,
43463: "nook" ,
43464: "noon" ,
43465: "noose" ,
43466: "nop" ,
43511: "nope" ,
43512: "nor" ,
43513: "nora" ,
43514: "norm" ,
43515: "norma" ,
43516: "north" ,
43521: "norway" ,
43522: "nose" ,
43523: "nosy" ,
43524: "not" ,
43525: "notch" ,
43526: "note" ,
43531: "noted" ,
43532: "notes" ,
43533: "noun" ,
43534: "nouns" ,
43535: "nov" ,
43536: "nova" ,
43541: "novak" ,
43542: "novel" ,
43543: "now" ,
43544: "np" ,
43545: "nq" ,
43546: "nr" ,
43551: "ns" ,
43552: "nt" ,
43553: "nu" ,
43554: "nuance" ,
43555: "nude" ,
43556: "nudge" ,
43561: "nuke" ,
43562: "null" ,
43563: "numb" ,
43564: "nun" ,
43565: "nuns" ,
43566: "nurse" ,
43611: "nut" ,
43612: "nutmeg" ,
43613: "nuts" ,
43614: "nutty" ,
43615: "nv" ,
43616: "nw" ,
43621: "nx" ,
43622: "ny" ,
43623: "nyc" ,
43624: "nylon" ,
43625: "nymph" ,
43626: "nz" ,
43631: "o" ,
43632: "o's" ,
43633: "oa" ,
43634: "oaf" ,
43635: "oak" ,
43636: "oaken" ,
43641: "oar" ,
43642: "oars" ,
43643: "oasis" ,
43644: "oat" ,
43645: "oath" ,
43646: "oats" ,
43651: "ob" ,
43652: "obese" ,
43653: "obey" ,
43654: "obeys" ,
43655: "obit" ,
43656: "object" ,
43661: "oboe" ,
43662: "oc" ,
43663: "occur" ,
43664: "ocean" ,
43665: "ocr" ,
43666: "oct" ,
44111: "octal" ,
44112: "octave" ,
44113: "od" ,
44114: "odd" ,
44115: "odds" ,
44116: "ode" ,
44121: "odor" ,
44122: "odors" ,
44123: "oe" ,
44124: "of" ,
44125: "off" ,
44126: "offend" ,
44131: "offer" ,
44132: "often" ,
44133: "og" ,
44134: "ogle" ,
44135: "ogled" ,
44136: "ogles" ,
44141: "ogre" ,
44142: "oh" ,
44143: "ohio" ,
44144: "oho" ,
44145: "oi" ,
44146: "oil" ,
44151: "oiled" ,
44152: "oils" ,
44153: "oily" ,
44154: "oink" ,
44155: "oj" ,
44156: "ok" ,
44161: "okay" ,
44162: "okays" ,
44163: "okra" ,
44164: "ol" ,
44165: "olaf" ,
44166: "old" ,
44211: "older" ,
44212: "ole" ,
44213: "olga" ,
44214: "olive" ,
44215: "olson" ,
44216: "om" ,
44221: "omaha" ,
44222: "omega" ,
44223: "omen" ,
44224: "omens" ,
44225: "omit" ,
44226: "omits" ,
44231: "on" ,
44232: "once" ,
44233: "one" ,
44234: "onion" ,
44235: "only" ,
44236: "onset" ,
44241: "onto" ,
44242: "onward" ,
44243: "oo" ,
44244: "ooo" ,
44245: "oooo" ,
44246: "oops" ,
44251: "ooze" ,
44252: "oozed" ,
44253: "op" ,
44254: "opal" ,
44255: "opals" ,
44256: "opec" ,
44261: "open" ,
44262: "opens" ,
44263: "opera" ,
44264: "opium" ,
44265: "opq" ,
44266: "opt" ,
44311: "optic" ,
44312: "opus" ,
44313: "oq" ,
44314: "or" ,
44315: "oral" ,
44316: "orb" ,
44321: "orbit" ,
44322: "orbs" ,
44323: "orchid" ,
44324: "order" ,
44325: "ore" ,
44326: "organ" ,
44331: "orgy" ,
44332: "ornery" ,
44333: "orphan" ,
44334: "os" ,
44335: "oscar" ,
44336: "ot" ,
44341: "other" ,
44342: "otis" ,
44343: "otter" ,
44344: "otto" ,
44345: "ou" ,
44346: "ouch" ,
44351: "ought" ,
44352: "ouija" ,
44353: "ounce" ,
44354: "our" ,
44355: "ours" ,
44356: "oust" ,
44361: "out" ,
44362: "outdo" ,
44363: "outer" ,
44364: "outlaw" ,
44365: "ov" ,
44366: "oval" ,
44411: "ovals" ,
44412: "ovary" ,
44413: "oven" ,
44414: "ovens" ,
44415: "over" ,
44416: "overt" ,
44421: "ow" ,
44422: "owe" ,
44423: "owed" ,
44424: "owens" ,
44425: "owes" ,
44426: "owing" ,
44431: "owl" ,
44432: "owls" ,
44433: "own" ,
44434: "owned" ,
44435: "owner" ,
44436: "owns" ,
44441: "ox" ,
44442: "oxen" ,
44443: "oxide" ,
44444: "oy" ,
44445: "oz" ,
44446: "ozone" ,
44451: "p" ,
44452: "p's" ,
44453: "pa" ,
44454: "pablo" ,
44455: "pace" ,
44456: "paces" ,
44461: "pack" ,
44462: "packet" ,
44463: "packs" ,
44464: "pact" ,
44465: "pad" ,
44466: "paddy" ,
44511: "pads" ,
44512: "pagan" ,
44513: "page" ,
44514: "pages" ,
44515: "paid" ,
44516: "pail" ,
44521: "pain" ,
44522: "pains" ,
44523: "paint" ,
44524: "pair" ,
44525: "pajama" ,
44526: "pal" ,
44531: "pale" ,
44532: "palm" ,
44533: "palms" ,
44534: "pals" ,
44535: "pam" ,
44536: "pan" ,
44541: "panama" ,
44542: "panda" ,
44543: "pane" ,
44544: "panel" ,
44545: "pang" ,
44546: "panic" ,
44551: "pans" ,
44552: "pansy" ,
44553: "pant" ,
44554: "pants" ,
44555: "papa" ,
44556: "paper" ,
44561: "pappy" ,
44562: "par" ,
44563: "pardon" ,
44564: "pare" ,
44565: "paris" ,
44566: "park" ,
44611: "parks" ,
44612: "parse" ,
44613: "part" ,
44614: "parts" ,
44615: "party" ,
44616: "pascal" ,
44621: "pass" ,
44622: "past" ,
44623: "paste" ,
44624: "pasty" ,
44625: "pat" ,
44626: "patch" ,
44631: "path" ,
44632: "paths" ,
44633: "patio" ,
44634: "pats" ,
44635: "patsy" ,
44636: "patton" ,
44641: "patty" ,
44642: "paul" ,
44643: "paula" ,
44644: "pause" ,
44645: "pave" ,
44646: "paved" ,
44651: "paves" ,
44652: "paw" ,
44653: "pawed" ,
44654: "pawn" ,
44655: "pawns" ,
44656: "paws" ,
44661: "pay" ,
44662: "payday" ,
44663: "pb" ,
44664: "pc" ,
44665: "pd" ,
44666: "pdq" ,
45111: "pe" ,
45112: "pea" ,
45113: "peace" ,
45114: "peach" ,
45115: "peak" ,
45116: "peaks" ,
45121: "pear" ,
45122: "pearl" ,
45123: "pears" ,
45124: "peas" ,
45125: "pebble" ,
45126: "pecan" ,
45131: "peck" ,
45132: "pecks" ,
45133: "pedal" ,
45134: "pedro" ,
45135: "pee" ,
45136: "peed" ,
45141: "peek" ,
45142: "peel" ,
45143: "peep" ,
45144: "peer" ,
45145: "peeve" ,
45146: "peg" ,
45151: "peggy" ,
45152: "pegs" ,
45153: "pelt" ,
45154: "pen" ,
45155: "penal" ,
45156: "pencil" ,
45161: "penn" ,
45162: "penny" ,
45163: "pens" ,
45164: "peony" ,
45165: "people" ,
45166: "pep" ,
45211: "peppy" ,
45212: "pepsi" ,
45213: "per" ,
45214: "perch" ,
45215: "percy" ,
45216: "perez" ,
45221: "peril" ,
45222: "period" ,
45223: "perk" ,
45224: "perks" ,
45225: "perky" ,
45226: "perm" ,
45231: "perry" ,
45232: "pert" ,
45233: "peru" ,
45234: "peso" ,
45235: "pest" ,
45236: "pests" ,
45241: "pet" ,
45242: "petal" ,
45243: "pete" ,
45244: "peter" ,
45245: "pets" ,
45246: "petty" ,
45251: "pf" ,
45252: "pfc" ,
45253: "pg" ,
45254: "ph" ,
45255: "phase" ,
45256: "phd" ,
45261: "phi" ,
45262: "phil" ,
45263: "phlox" ,
45264: "phone" ,
45265: "phony" ,
45266: "photo" ,
45311: "phrase" ,
45312: "pi" ,
45313: "piano" ,
45314: "pick" ,
45315: "picks" ,
45316: "pickup" ,
45321: "picky" ,
45322: "picnic" ,
45323: "pie" ,
45324: "piece" ,
45325: "pier" ,
45326: "pierce" ,
45331: "piers" ,
45332: "pies" ,
45333: "piety" ,
45334: "pig" ,
45335: "piggy" ,
45336: "pigs" ,
45341: "pike" ,
45342: "pile" ,
45343: "piles" ,
45344: "pill" ,
45345: "pills" ,
45346: "pilot" ,
45351: "pimp" ,
45352: "pimple" ,
45353: "pin" ,
45354: "pinch" ,
45355: "pine" ,
45356: "pines" ,
45361: "ping" ,
45362: "pink" ,
45363: "pinko" ,
45364: "pins" ,
45365: "pint" ,
45366: "pinto" ,
45411: "pinup" ,
45412: "pious" ,
45413: "pip" ,
45414: "pipe" ,
45415: "piper" ,
45416: "pirate" ,
45421: "pit" ,
45422: "pita" ,
45423: "pitch" ,
45424: "pith" ,
45425: "pithy" ,
45426: "pits" ,
45431: "pity" ,
45432: "pivot" ,
45433: "pixel" ,
45434: "pixie" ,
45435: "pizza" ,
45436: "pj" ,
45441: "pk" ,
45442: "pl" ,
45443: "place" ,
45444: "plague" ,
45445: "plaid" ,
45446: "plain" ,
45451: "plan" ,
45452: "plane" ,
45453: "planet" ,
45454: "plank" ,
45455: "plant" ,
45456: "plate" ,
45461: "plato" ,
45462: "play" ,
45463: "plays" ,
45464: "plaza" ,
45465: "plea" ,
45466: "plead" ,
45511: "pleas" ,
45512: "pleat" ,
45513: "pledge" ,
45514: "plod" ,
45515: "plods" ,
45516: "plop" ,
45521: "plot" ,
45522: "plots" ,
45523: "plow" ,
45524: "plows" ,
45525: "ploy" ,
45526: "ploys" ,
45531: "pluck" ,
45532: "plug" ,
45533: "plugs" ,
45534: "plum" ,
45535: "plume" ,
45536: "plump" ,
45541: "plums" ,
45542: "plus" ,
45543: "plush" ,
45544: "pluto" ,
45545: "ply" ,
45546: "pm" ,
45551: "pms" ,
45552: "pn" ,
45553: "po" ,
45554: "poach" ,
45555: "pobox" ,
45556: "pod" ,
45561: "pods" ,
45562: "poe" ,
45563: "poem" ,
45564: "poems" ,
45565: "poet" ,
45566: "poetry" ,
45611: "pogo" ,
45612: "poi" ,
45613: "point" ,
45614: "poise" ,
45615: "poison" ,
45616: "poke" ,
45621: "poked" ,
45622: "pokes" ,
45623: "pol" ,
45624: "polar" ,
45625: "pole" ,
45626: "poles" ,
45631: "police" ,
45632: "polio" ,
45633: "polk" ,
45634: "polka" ,
45635: "poll" ,
45636: "polls" ,
45641: "polo" ,
45642: "pomp" ,
45643: "pond" ,
45644: "ponds" ,
45645: "pony" ,
45646: "pooch" ,
45651: "pooh" ,
45652: "pool" ,
45653: "pools" ,
45654: "poop" ,
45655: "poor" ,
45656: "pop" ,
45661: "pope" ,
45662: "poppy" ,
45663: "pops" ,
45664: "porch" ,
45665: "pore" ,
45666: "pores" ,
46111: "pork" ,
46112: "porn" ,
46113: "porous" ,
46114: "port" ,
46115: "pose" ,
46116: "posed" ,
46121: "poses" ,
46122: "posh" ,
46123: "posse" ,
46124: "post" ,
46125: "posts" ,
46126: "posy" ,
46131: "pot" ,
46132: "potato" ,
46133: "pots" ,
46134: "potts" ,
46135: "pouch" ,
46136: "pound" ,
46141: "pour" ,
46142: "pours" ,
46143: "pout" ,
46144: "pouts" ,
46145: "pow" ,
46146: "powder" ,
46151: "power" ,
46152: "pox" ,
46153: "pp" ,
46154: "ppm" ,
46155: "ppp" ,
46156: "pppp" ,
46161: "pq" ,
46162: "pqr" ,
46163: "pr" ,
46164: "prank" ,
46165: "prawn" ,
46166: "pray" ,
46211: "prays" ,
46212: "preen" ,
46213: "prefix" ,
46214: "prep" ,
46215: "press" ,
46216: "prexy" ,
46221: "prey" ,
46222: "price" ,
46223: "prick" ,
46224: "pride" ,
46225: "prig" ,
46226: "prim" ,
46231: "prime" ,
46232: "prince" ,
46233: "print" ,
46234: "prior" ,
46235: "prism" ,
46236: "prissy" ,
46241: "privy" ,
46242: "prize" ,
46243: "pro" ,
46244: "probe" ,
46245: "prod" ,
46246: "prods" ,
46251: "prof" ,
46252: "prom" ,
46253: "promo" ,
46254: "prone" ,
46255: "prong" ,
46256: "proof" ,
46261: "prop" ,
46262: "propel" ,
46263: "props" ,
46264: "prose" ,
46265: "proud" ,
46266: "prove" ,
46311: "prow" ,
46312: "prowl" ,
46313: "proxy" ,
46314: "prude" ,
46315: "prune" ,
46316: "pry" ,
46321: "ps" ,
46322: "psalm" ,
46323: "psi" ,
46324: "psych" ,
46325: "pt" ,
46326: "pu" ,
46331: "pub" ,
46332: "pubic" ,
46333: "pubs" ,
46334: "puck" ,
46335: "pucker" ,
46336: "puddle" ,
46341: "pudgy" ,
46342: "puff" ,
46343: "puffs" ,
46344: "puffy" ,
46345: "pug" ,
46346: "puke" ,
46351: "pull" ,
46352: "pulls" ,
46353: "pulp" ,
46354: "pulse" ,
46355: "puma" ,
46356: "pump" ,
46361: "pumps" ,
46362: "pun" ,
46363: "punch" ,
46364: "punish" ,
46365: "punk" ,
46366: "punks" ,
46411: "punky" ,
46412: "puns" ,
46413: "punt" ,
46414: "punts" ,
46415: "puny" ,
46416: "pup" ,
46421: "pupil" ,
46422: "puppy" ,
46423: "pure" ,
46424: "purge" ,
46425: "purr" ,
46426: "purse" ,
46431: "pus" ,
46432: "push" ,
46433: "pushy" ,
46434: "pussy" ,
46435: "put" ,
46436: "puts" ,
46441: "putt" ,
46442: "putty" ,
46443: "puzzle" ,
46444: "pv" ,
46445: "pvc" ,
46446: "pw" ,
46451: "px" ,
46452: "py" ,
46453: "pygmy" ,
46454: "pyre" ,
46455: "pyrex" ,
46456: "pz" ,
46461: "q" ,
46462: "q&a" ,
46463: "q's" ,
46464: "qa" ,
46465: "qb" ,
46466: "qc" ,
46511: "qd" ,
46512: "qe" ,
46513: "qed" ,
46514: "qf" ,
46515: "qg" ,
46516: "qh" ,
46521: "qi" ,
46522: "qj" ,
46523: "qk" ,
46524: "ql" ,
46525: "qm" ,
46526: "qn" ,
46531: "qo" ,
46532: "qp" ,
46533: "qq" ,
46534: "qqq" ,
46535: "qqqq" ,
46536: "qr" ,
46541: "qrs" ,
46542: "qs" ,
46543: "qt" ,
46544: "qu" ,
46545: "quack" ,
46546: "quad" ,
46551: "quail" ,
46552: "quake" ,
46553: "quarry" ,
46554: "quart" ,
46555: "queasy" ,
46556: "queen" ,
46561: "query" ,
46562: "quest" ,
46563: "queue" ,
46564: "quick" ,
46565: "quiet" ,
46566: "quill" ,
46611: "quilt" ,
46612: "quinn" ,
46613: "quip" ,
46614: "quips" ,
46615: "quirk" ,
46616: "quit" ,
46621: "quite" ,
46622: "quits" ,
46623: "quiver" ,
46624: "quiz" ,
46625: "quota" ,
46626: "quote" ,
46631: "qv" ,
46632: "qw" ,
46633: "qx" ,
46634: "qy" ,
46635: "qz" ,
46636: "r" ,
46641: "r&b" ,
46642: "r&d" ,
46643: "r&r" ,
46644: "r's" ,
46645: "ra" ,
46646: "rabbi" ,
46651: "rabbit" ,
46652: "rabid" ,
46653: "race" ,
46654: "raced" ,
46655: "races" ,
46656: "rack" ,
46661: "racy" ,
46662: "radar" ,
46663: "radio" ,
46664: "radish" ,
46665: "raft" ,
46666: "rafts" ,
51111: "rag" ,
51112: "rage" ,
51113: "raged" ,
51114: "rags" ,
51115: "raid" ,
51116: "raids" ,
51121: "rail" ,
51122: "rails" ,
51123: "rain" ,
51124: "rains" ,
51125: "rainy" ,
51126: "raise" ,
51131: "rake" ,
51132: "raked" ,
51133: "rakes" ,
51134: "rally" ,
51135: "ralph" ,
51136: "ram" ,
51141: "rambo" ,
51142: "ramp" ,
51143: "rams" ,
51144: "ramsey" ,
51145: "ran" ,
51146: "ranch" ,
51151: "rand" ,
51152: "randy" ,
51153: "rang" ,
51154: "range" ,
51155: "rank" ,
51156: "ranks" ,
51161: "rant" ,
51162: "rants" ,
51163: "raoul" ,
51164: "rap" ,
51165: "rape" ,
51166: "raped" ,
51211: "rapid" ,
51212: "raps" ,
51213: "rare" ,
51214: "rascal" ,
51215: "rash" ,
51216: "rat" ,
51221: "rate" ,
51222: "rated" ,
51223: "rates" ,
51224: "ratio" ,
51225: "rats" ,
51226: "rattle" ,
51231: "rave" ,
51232: "raved" ,
51233: "raven" ,
51234: "raw" ,
51235: "ray" ,
51236: "rayon" ,
51241: "rays" ,
51242: "raze" ,
51243: "razor" ,
51244: "rb" ,
51245: "rc" ,
51246: "rd" ,
51251: "re" ,
51252: "reach" ,
51253: "read" ,
51254: "reads" ,
51255: "ready" ,
51256: "reagan" ,
51261: "real" ,
51262: "realm" ,
51263: "reap" ,
51264: "rear" ,
51265: "rebel" ,
51266: "rebut" ,
51311: "recap" ,
51312: "recipe" ,
51313: "recur" ,
51314: "red" ,
51315: "redeem" ,
51316: "redo" ,
51321: "reduce" ,
51322: "reed" ,
51323: "reeds" ,
51324: "reef" ,
51325: "reek" ,
51326: "reeks" ,
51331: "reel" ,
51332: "reels" ,
51333: "ref" ,
51334: "refer" ,
51335: "refs" ,
51336: "regal" ,
51341: "regs" ,
51342: "rehab" ,
51343: "reich" ,
51344: "reid" ,
51345: "reign" ,
51346: "rein" ,
51351: "reins" ,
51352: "reject" ,
51353: "relax" ,
51354: "relay" ,
51355: "relic" ,
51356: "rely" ,
51361: "rem" ,
51362: "remedy" ,
51363: "remit" ,
51364: "remix" ,
51365: "rena" ,
51366: "rend" ,
51411: "renee" ,
51412: "renew" ,
51413: "reno" ,
51414: "renown" ,
51415: "rent" ,
51416: "rents" ,
51421: "rep" ,
51422: "repay" ,
51423: "repel" ,
51424: "repent" ,
51425: "reply" ,
51426: "reps" ,
51431: "rerun" ,
51432: "reset" ,
51433: "resin" ,
51434: "resort" ,
51435: "rest" ,
51436: "rests" ,
51441: "retch" ,
51442: "return" ,
51443: "reuse" ,
51444: "rev" ,
51445: "reveal" ,
51446: "revel" ,
51451: "review" ,
51452: "rex" ,
51453: "rf" ,
51454: "rg" ,
51455: "rh" ,
51456: "rhino" ,
51461: "rho" ,
51462: "rhoda" ,
51463: "rhyme" ,
51464: "ri" ,
51465: "rib" ,
51466: "ribs" ,
51511: "rice" ,
51512: "rich" ,
51513: "rick" ,
51514: "ricky" ,
51515: "rico" ,
51516: "rid" ,
51521: "ride" ,
51522: "rider" ,
51523: "ridge" ,
51524: "rif" ,
51525: "rifle" ,
51526: "rift" ,
51531: "rig" ,
51532: "riggs" ,
51533: "right" ,
51534: "rigid" ,
51535: "rigs" ,
51536: "riley" ,
51541: "rim" ,
51542: "rims" ,
51543: "rind" ,
51544: "ring" ,
51545: "ringo" ,
51546: "rings" ,
51551: "rink" ,
51552: "rinse" ,
51553: "rio" ,
51554: "riot" ,
51555: "riots" ,
51556: "rip" ,
51561: "ripe" ,
51562: "ripen" ,
51563: "ripley" ,
51564: "rips" ,
51565: "rise" ,
51566: "risen" ,
51611: "risk" ,
51612: "risky" ,
51613: "rite" ,
51614: "ritual" ,
51615: "rival" ,
51616: "river" ,
51621: "rivet" ,
51622: "rj" ,
51623: "rk" ,
51624: "rl" ,
51625: "rm" ,
51626: "rn" ,
51631: "rna" ,
51632: "ro" ,
51633: "roach" ,
51634: "road" ,
51635: "roads" ,
51636: "roam" ,
51641: "roar" ,
51642: "roast" ,
51643: "rob" ,
51644: "robe" ,
51645: "robin" ,
51646: "robot" ,
51651: "robs" ,
51652: "rock" ,
51653: "rocket" ,
51654: "rocks" ,
51655: "rocky" ,
51656: "rod" ,
51661: "rode" ,
51662: "rodeo" ,
51663: "rods" ,
51664: "roger" ,
51665: "rogue" ,
51666: "role" ,
52111: "roll" ,
52112: "rolls" ,
52113: "roman" ,
52114: "rome" ,
52115: "romeo" ,
52116: "romp" ,
52121: "ron" ,
52122: "roof" ,
52123: "rook" ,
52124: "rookie" ,
52125: "room" ,
52126: "rooms" ,
52131: "roomy" ,
52132: "roost" ,
52133: "root" ,
52134: "roots" ,
52135: "rope" ,
52136: "rosa" ,
52141: "rose" ,
52142: "ross" ,
52143: "rosy" ,
52144: "rot" ,
52145: "rote" ,
52146: "roth" ,
52151: "rots" ,
52152: "rouge" ,
52153: "rough" ,
52154: "round" ,
52155: "rouse" ,
52156: "rout" ,
52161: "route" ,
52162: "rover" ,
52163: "row" ,
52164: "rowdy" ,
52165: "rows" ,
52166: "roy" ,
52211: "royal" ,
52212: "rp" ,
52213: "rpg" ,
52214: "rq" ,
52215: "rr" ,
52216: "rrr" ,
52221: "rrrr" ,
52222: "rs" ,
52223: "rst" ,
52224: "rsvp" ,
52225: "rt" ,
52226: "ru" ,
52231: "rub" ,
52232: "rube" ,
52233: "rubs" ,
52234: "ruby" ,
52235: "rude" ,
52236: "rudy" ,
52241: "rufus" ,
52242: "rug" ,
52243: "rugged" ,
52244: "rugs" ,
52245: "ruin" ,
52246: "ruins" ,
52251: "rule" ,
52252: "ruler" ,
52253: "rules" ,
52254: "rum" ,
52255: "rummy" ,
52256: "rumor" ,
52261: "rump" ,
52262: "rumpus" ,
52263: "run" ,
52264: "rune" ,
52265: "runes" ,
52266: "rung" ,
52311: "runs" ,
52312: "runt" ,
52313: "runway" ,
52314: "rural" ,
52315: "ruse" ,
52316: "rush" ,
52321: "russ" ,
52322: "rust" ,
52323: "rusts" ,
52324: "rusty" ,
52325: "rut" ,
52326: "ruth" ,
52331: "ruts" ,
52332: "rv" ,
52333: "rw" ,
52334: "rx" ,
52335: "ry" ,
52336: "ryan" ,
52341: "rye" ,
52342: "rz" ,
52343: "s" ,
52344: "s's" ,
52345: "sa" ,
52346: "saber" ,
52351: "sable" ,
52352: "sac" ,
52353: "sack" ,
52354: "sacks" ,
52355: "sacred" ,
52356: "sad" ,
52361: "saddle" ,
52362: "sadly" ,
52363: "safari" ,
52364: "safe" ,
52365: "safer" ,
52366: "safes" ,
52411: "sag" ,
52412: "saga" ,
52413: "sagas" ,
52414: "sage" ,
52415: "sags" ,
52416: "said" ,
52421: "sail" ,
52422: "sails" ,
52423: "saint" ,
52424: "sake" ,
52425: "sal" ,
52426: "salad" ,
52431: "salami" ,
52432: "sale" ,
52433: "sales" ,
52434: "salk" ,
52435: "sally" ,
52436: "salon" ,
52441: "salt" ,
52442: "salts" ,
52443: "salty" ,
52444: "salvo" ,
52445: "sam" ,
52446: "same" ,
52451: "sammy" ,
52452: "samuel" ,
52453: "sand" ,
52454: "sandal" ,
52455: "sands" ,
52456: "sandy" ,
52461: "sane" ,
52462: "sang" ,
52463: "sank" ,
52464: "santa" ,
52465: "sap" ,
52466: "sappy" ,
52511: "saps" ,
52512: "sara" ,
52513: "sarah" ,
52514: "saran" ,
52515: "sase" ,
52516: "sash" ,
52521: "sat" ,
52522: "satan" ,
52523: "satin" ,
52524: "sauce" ,
52525: "saucy" ,
52526: "saudi" ,
52531: "saul" ,
52532: "sauna" ,
52533: "saute" ,
52534: "save" ,
52535: "saved" ,
52536: "saves" ,
52541: "savvy" ,
52542: "saw" ,
52543: "saws" ,
52544: "sawyer" ,
52545: "sax" ,
52546: "say" ,
52551: "says" ,
52552: "sb" ,
52553: "sc" ,
52554: "scab" ,
52555: "scald" ,
52556: "scale" ,
52561: "scalp" ,
52562: "scam" ,
52563: "scamp" ,
52564: "scan" ,
52565: "scans" ,
52566: "scar" ,
52611: "scare" ,
52612: "scarf" ,
52613: "scars" ,
52614: "scary" ,
52615: "scat" ,
52616: "scene" ,
52621: "scent" ,
52622: "school" ,
52623: "scoff" ,
52624: "scold" ,
52625: "scoop" ,
52626: "scoot" ,
52631: "scope" ,
52632: "scorch" ,
52633: "score" ,
52634: "scorn" ,
52635: "scot" ,
52636: "scott" ,
52641: "scour" ,
52642: "scout" ,
52643: "scow" ,
52644: "scowl" ,
52645: "scram" ,
52646: "scrap" ,
52651: "scrape" ,
52652: "screw" ,
52653: "scrip" ,
52654: "scrod" ,
52655: "scrub" ,
52656: "scuba" ,
52661: "scuff" ,
52662: "scum" ,
52663: "scurry" ,
52664: "sd" ,
52665: "sdi" ,
52666: "se" ,
53111: "sea" ,
53112: "seal" ,
53113: "seals" ,
53114: "seam" ,
53115: "seams" ,
53116: "seamy" ,
53121: "sean" ,
53122: "sear" ,
53123: "sears" ,
53124: "seas" ,
53125: "season" ,
53126: "seat" ,
53131: "seats" ,
53132: "sect" ,
53133: "sects" ,
53134: "sedan" ,
53135: "seduce" ,
53136: "see" ,
53141: "seed" ,
53142: "seeds" ,
53143: "seedy" ,
53144: "seek" ,
53145: "seeks" ,
53146: "seem" ,
53151: "seems" ,
53152: "seen" ,
53153: "seep" ,
53154: "seer" ,
53155: "seers" ,
53156: "sees" ,
53161: "seethe" ,
53162: "seize" ,
53163: "self" ,
53164: "sell" ,
53165: "sells" ,
53166: "semen" ,
53211: "semi" ,
53212: "send" ,
53213: "sends" ,
53214: "sense" ,
53215: "sent" ,
53216: "sentry" ,
53221: "sep" ,
53222: "sepia" ,
53223: "sequel" ,
53224: "sequin" ,
53225: "serb" ,
53226: "serf" ,
53231: "serum" ,
53232: "serve" ,
53233: "servo" ,
53234: "set" ,
53235: "seth" ,
53236: "sets" ,
53241: "setup" ,
53242: "seven" ,
53243: "sever" ,
53244: "severe" ,
53245: "sew" ,
53246: "sewed" ,
53251: "sewer" ,
53252: "sewn" ,
53253: "sews" ,
53254: "sex" ,
53255: "sexy" ,
53256: "sf" ,
53261: "sg" ,
53262: "sgt" ,
53263: "sh" ,
53264: "shack" ,
53265: "shade" ,
53266: "shady" ,
53311: "shaft" ,
53312: "shaggy" ,
53313: "shake" ,
53314: "shaken" ,
53315: "shaky" ,
53316: "shall" ,
53321: "sham" ,
53322: "shame" ,
53323: "shank" ,
53324: "shape" ,
53325: "share" ,
53326: "shari" ,
53331: "shark" ,
53332: "sharp" ,
53333: "shave" ,
53334: "shaw" ,
53335: "shawl" ,
53336: "she" ,
53341: "she'd" ,
53342: "she's" ,
53343: "shea" ,
53344: "sheaf" ,
53345: "shear" ,
53346: "sheath" ,
53351: "shed" ,
53352: "sheds" ,
53353: "sheep" ,
53354: "sheer" ,
53355: "sheet" ,
53356: "sheik" ,
53361: "shelf" ,
53362: "shell" ,
53363: "shh" ,
53364: "shift" ,
53365: "shifty" ,
53366: "shin" ,
53411: "shine" ,
53412: "shins" ,
53413: "shiny" ,
53414: "ship" ,
53415: "ships" ,
53416: "shirk" ,
53421: "shirt" ,
53422: "shock" ,
53423: "shoe" ,
53424: "shoes" ,
53425: "shone" ,
53426: "shoo" ,
53431: "shook" ,
53432: "shoot" ,
53433: "shop" ,
53434: "shops" ,
53435: "shore" ,
53436: "short" ,
53441: "shot" ,
53442: "shots" ,
53443: "shout" ,
53444: "shove" ,
53445: "show" ,
53446: "shown" ,
53451: "shows" ,
53452: "shrank" ,
53453: "shred" ,
53454: "shrew" ,
53455: "shriek" ,
53456: "shrub" ,
53461: "shrug" ,
53462: "shuck" ,
53463: "shun" ,
53464: "shut" ,
53465: "shuts" ,
53466: "shy" ,
53511: "shyly" ,
53512: "si" ,
53513: "sic" ,
53514: "sick" ,
53515: "sicko" ,
53516: "sid" ,
53521: "side" ,
53522: "siege" ,
53523: "siesta" ,
53524: "sieve" ,
53525: "sift" ,
53526: "sifts" ,
53531: "sigh" ,
53532: "sighs" ,
53533: "sight" ,
53534: "sigma" ,
53535: "sign" ,
53536: "signal" ,
53541: "signs" ,
53542: "silk" ,
53543: "silks" ,
53544: "silky" ,
53545: "sill" ,
53546: "silly" ,
53551: "silo" ,
53552: "silt" ,
53553: "silver" ,
53554: "simms" ,
53555: "simon" ,
53556: "simons" ,
53561: "sims" ,
53562: "sin" ,
53563: "since" ,
53564: "sinew" ,
53565: "sing" ,
53566: "sings" ,
53611: "sink" ,
53612: "sinks" ,
53613: "sins" ,
53614: "sinus" ,
53615: "sip" ,
53616: "sips" ,
53621: "sir" ,
53622: "sire" ,
53623: "siren" ,
53624: "sis" ,
53625: "sit" ,
53626: "site" ,
53631: "sites" ,
53632: "sits" ,
53633: "six" ,
53634: "sixgun" ,
53635: "sixth" ,
53636: "sixty" ,
53641: "size" ,
53642: "sizes" ,
53643: "sj" ,
53644: "sk" ,
53645: "skate" ,
53646: "skew" ,
53651: "ski" ,
53652: "skid" ,
53653: "skids" ,
53654: "skies" ,
53655: "skill" ,
53656: "skim" ,
53661: "skimpy" ,
53662: "skims" ,
53663: "skin" ,
53664: "skip" ,
53665: "skips" ,
53666: "skirt" ,
54111: "skis" ,
54112: "skit" ,
54113: "skits" ,
54114: "skulk" ,
54115: "skull" ,
54116: "skunk" ,
54121: "sky" ,
54122: "sl" ,
54123: "slab" ,
54124: "slabs" ,
54125: "slack" ,
54126: "slain" ,
54131: "slam" ,
54132: "slams" ,
54133: "slang" ,
54134: "slant" ,
54135: "slap" ,
54136: "slaps" ,
54141: "slash" ,
54142: "slate" ,
54143: "slater" ,
54144: "slave" ,
54145: "slaw" ,
54146: "slay" ,
54151: "sled" ,
54152: "sleds" ,
54153: "sleek" ,
54154: "sleep" ,
54155: "sleet" ,
54156: "slept" ,
54161: "slew" ,
54162: "slice" ,
54163: "slick" ,
54164: "slid" ,
54165: "slide" ,
54166: "slim" ,
54211: "slime" ,
54212: "slimy" ,
54213: "sling" ,
54214: "slip" ,
54215: "slips" ,
54216: "slit" ,
54221: "sliver" ,
54222: "slob" ,
54223: "slog" ,
54224: "sloop" ,
54225: "slop" ,
54226: "slope" ,
54231: "sloppy" ,
54232: "slops" ,
54233: "slosh" ,
54234: "slot" ,
54235: "sloth" ,
54236: "slots" ,
54241: "slow" ,
54242: "slows" ,
54243: "slug" ,
54244: "slugs" ,
54245: "slum" ,
54246: "slump" ,
54251: "slums" ,
54252: "slung" ,
54253: "slur" ,
54254: "slurp" ,
54255: "slurs" ,
54256: "sly" ,
54261: "slyly" ,
54262: "sm" ,
54263: "smack" ,
54264: "small" ,
54265: "smart" ,
54266: "smash" ,
54311: "smear" ,
54312: "smell" ,
54313: "smile" ,
54314: "smirk" ,
54315: "smith" ,
54316: "smock" ,
54321: "smog" ,
54322: "smoke" ,
54323: "smoky" ,
54324: "smooth" ,
54325: "smug" ,
54326: "smut" ,
54331: "sn" ,
54332: "snack" ,
54333: "snafu" ,
54334: "snag" ,
54335: "snail" ,
54336: "snake" ,
54341: "snap" ,
54342: "snaps" ,
54343: "snare" ,
54344: "snarl" ,
54345: "snatch" ,
54346: "sneak" ,
54351: "sneer" ,
54352: "sniff" ,
54353: "snip" ,
54354: "snipe" ,
54355: "snob" ,
54356: "snobs" ,
54361: "snoop" ,
54362: "snore" ,
54363: "snort" ,
54364: "snot" ,
54365: "snout" ,
54366: "snow" ,
54411: "snows" ,
54412: "snowy" ,
54413: "snub" ,
54414: "snubs" ,
54415: "snuff" ,
54416: "snug" ,
54421: "so" ,
54422: "soak" ,
54423: "soaks" ,
54424: "soap" ,
54425: "soapy" ,
54426: "soar" ,
54431: "soars" ,
54432: "sob" ,
54433: "sober" ,
54434: "sobs" ,
54435: "social" ,
54436: "sock" ,
54441: "socks" ,
54442: "sod" ,
54443: "soda" ,
54444: "sofa" ,
54445: "soft" ,
54446: "soften" ,
54451: "soggy" ,
54452: "soil" ,
54453: "soils" ,
54454: "sol" ,
54455: "solar" ,
54456: "sold" ,
54461: "sole" ,
54462: "solemn" ,
54463: "solid" ,
54464: "solo" ,
54465: "solve" ,
54466: "somber" ,
54511: "some" ,
54512: "son" ,
54513: "sonar" ,
54514: "song" ,
54515: "songs" ,
54516: "sonny" ,
54521: "sons" ,
54522: "sony" ,
54523: "soon" ,
54524: "soot" ,
54525: "sop" ,
54526: "sore" ,
54531: "sorry" ,
54532: "sort" ,
54533: "sorts" ,
54534: "sos" ,
54535: "sot" ,
54536: "soul" ,
54541: "sound" ,
54542: "soup" ,
54543: "soupy" ,
54544: "sour" ,
54545: "source" ,
54546: "south" ,
54551: "sow" ,
54552: "sown" ,
54553: "sows" ,
54554: "sox" ,
54555: "soy" ,
54556: "soyuz" ,
54561: "sp" ,
54562: "spa" ,
54563: "space" ,
54564: "spade" ,
54565: "spain" ,
54566: "spam" ,
54611: "span" ,
54612: "spank" ,
54613: "spans" ,
54614: "spar" ,
54615: "spare" ,
54616: "spark" ,
54621: "sparks" ,
54622: "spas" ,
54623: "spasm" ,
54624: "spat" ,
54625: "spawn" ,
54626: "spay" ,
54631: "speak" ,
54632: "spear" ,
54633: "spec" ,
54634: "speck" ,
54635: "sped" ,
54636: "speed" ,
54641: "spell" ,
54642: "spend" ,
54643: "spent" ,
54644: "sperm" ,
54645: "spew" ,
54646: "sphinx" ,
54651: "spice" ,
54652: "spicy" ,
54653: "spies" ,
54654: "spike" ,
54655: "spiky" ,
54656: "spill" ,
54661: "spin" ,
54662: "spine" ,
54663: "spins" ,
54664: "spiny" ,
54665: "spire" ,
54666: "spit" ,
55111: "spite" ,
55112: "spits" ,
55113: "spitz" ,
55114: "splat" ,
55115: "split" ,
55116: "spock" ,
55121: "spoil" ,
55122: "spoke" ,
55123: "sponge" ,
55124: "spoof" ,
55125: "spook" ,
55126: "spooky" ,
55131: "spool" ,
55132: "spoon" ,
55133: "spore" ,
55134: "sport" ,
55135: "spot" ,
55136: "spots" ,
55141: "spout" ,
55142: "sprain" ,
55143: "spray" ,
55144: "spree" ,
55145: "sprig" ,
55146: "spruce" ,
55151: "spry" ,
55152: "spud" ,
55153: "spun" ,
55154: "spunk" ,
55155: "spur" ,
55156: "spurn" ,
55161: "spurs" ,
55162: "spurt" ,
55163: "spy" ,
55164: "sq" ,
55165: "squad" ,
55166: "squat" ,
55211: "squid" ,
55212: "squint" ,
55213: "squirm" ,
55214: "sr" ,
55215: "ss" ,
55216: "sse" ,
55221: "sss" ,
55222: "ssss" ,
55223: "sst" ,
55224: "ssw" ,
55225: "st" ,
55226: "stab" ,
55231: "stabs" ,
55232: "stack" ,
55233: "stacy" ,
55234: "staff" ,
55235: "stag" ,
55236: "stage" ,
55241: "stain" ,
55242: "stair" ,
55243: "stake" ,
55244: "stale" ,
55245: "stalk" ,
55246: "stall" ,
55251: "stamp" ,
55252: "stan" ,
55253: "stance" ,
55254: "stand" ,
55255: "stank" ,
55256: "star" ,
55261: "stare" ,
55262: "stark" ,
55263: "starr" ,
55264: "stars" ,
55265: "start" ,
55266: "stash" ,
55311: "stat" ,
55312: "state" ,
55313: "stats" ,
55314: "statue" ,
55315: "stay" ,
55316: "stays" ,
55321: "steady" ,
55322: "steak" ,
55323: "steal" ,
55324: "steam" ,
55325: "steed" ,
55326: "steel" ,
55331: "steep" ,
55332: "steer" ,
55333: "stein" ,
55334: "stella" ,
55335: "stem" ,
55336: "stems" ,
55341: "step" ,
55342: "steps" ,
55343: "stern" ,
55344: "steve" ,
55345: "stew" ,
55346: "stick" ,
55351: "stiff" ,
55352: "still" ,
55353: "sting" ,
55354: "stingy" ,
55355: "stink" ,
55356: "stint" ,
55361: "stir" ,
55362: "stirs" ,
55363: "stock" ,
55364: "stoke" ,
55365: "stole" ,
55366: "stomp" ,
55411: "stone" ,
55412: "stony" ,
55413: "stood" ,
55414: "stool" ,
55415: "stoop" ,
55416: "stop" ,
55421: "stops" ,
55422: "store" ,
55423: "stork" ,
55424: "storm" ,
55425: "stormy" ,
55426: "story" ,
55431: "stout" ,
55432: "stove" ,
55433: "stow" ,
55434: "strafe" ,
55435: "strap" ,
55436: "straw" ,
55441: "stray" ,
55442: "strep" ,
55443: "strike" ,
55444: "strip" ,
55445: "stroll" ,
55446: "strum" ,
55451: "strut" ,
55452: "stu" ,
55453: "stuart" ,
55454: "stub" ,
55455: "stuck" ,
55456: "stud" ,
55461: "study" ,
55462: "stuff" ,
55463: "stuffy" ,
55464: "stump" ,
55465: "stun" ,
55466: "stung" ,
55511: "stunk" ,
55512: "stuns" ,
55513: "stunt" ,
55514: "sty" ,
55515: "style" ,
55516: "styx" ,
55521: "su" ,
55522: "suave" ,
55523: "sub" ,
55524: "subs" ,
55525: "subtle" ,
55526: "such" ,
55531: "suck" ,
55532: "sucks" ,
55533: "suds" ,
55534: "sue" ,
55535: "sued" ,
55536: "suede" ,
55541: "sues" ,
55542: "suey" ,
55543: "sugar" ,
55544: "suit" ,
55545: "suite" ,
55546: "suits" ,
55551: "sulk" ,
55552: "sulks" ,
55553: "sulky" ,
55554: "sultry" ,
55555: "sum" ,
55556: "sumac" ,
55561: "summon" ,
55562: "sumo" ,
55563: "sums" ,
55564: "sun" ,
55565: "sung" ,
55566: "sunk" ,
55611: "sunny" ,
55612: "suns" ,
55613: "sunset" ,
55614: "sunup" ,
55615: "sup" ,
55616: "super" ,
55621: "supt" ,
55622: "sure" ,
55623: "surf" ,
55624: "surge" ,
55625: "susan" ,
55626: "sushi" ,
55631: "susie" ,
55632: "sutton" ,
55633: "suzy" ,
55634: "sv" ,
55635: "sven" ,
55636: "sw" ,
55641: "swab" ,
55642: "swag" ,
55643: "swam" ,
55644: "swami" ,
55645: "swamp" ,
55646: "swampy" ,
55651: "swan" ,
55652: "swank" ,
55653: "swans" ,
55654: "swap" ,
55655: "swarm" ,
55656: "swat" ,
55661: "sway" ,
55662: "sways" ,
55663: "swear" ,
55664: "sweat" ,
55665: "sweaty" ,
55666: "swede" ,
56111: "sweep" ,
56112: "sweet" ,
56113: "swell" ,
56114: "swept" ,
56115: "swift" ,
56116: "swig" ,
56121: "swim" ,
56122: "swims" ,
56123: "swine" ,
56124: "swing" ,
56125: "swipe" ,
56126: "swirl" ,
56131: "swish" ,
56132: "swiss" ,
56133: "swoop" ,
56134: "sword" ,
56135: "swore" ,
56136: "sworn" ,
56141: "swum" ,
56142: "swung" ,
56143: "sx" ,
56144: "sy" ,
56145: "sybil" ,
56146: "symbol" ,
56151: "syrup" ,
56152: "sz" ,
56153: "t" ,
56154: "t&a" ,
56155: "t's" ,
56156: "ta" ,
56161: "tab" ,
56162: "table" ,
56163: "tablet" ,
56164: "taboo" ,
56165: "tabs" ,
56166: "tabu" ,
56211: "tack" ,
56212: "tacky" ,
56213: "taco" ,
56214: "tact" ,
56215: "tactic" ,
56216: "tad" ,
56221: "taffy" ,
56222: "taft" ,
56223: "tag" ,
56224: "tags" ,
56225: "tail" ,
56226: "tails" ,
56231: "taint" ,
56232: "take" ,
56233: "taken" ,
56234: "takes" ,
56235: "tale" ,
56236: "tales" ,
56241: "talk" ,
56242: "talks" ,
56243: "tall" ,
56244: "tally" ,
56245: "talon" ,
56246: "tame" ,
56251: "tamer" ,
56252: "tamper" ,
56253: "tan" ,
56254: "tang" ,
56255: "tango" ,
56256: "tangy" ,
56261: "tank" ,
56262: "tanks" ,
56263: "tans" ,
56264: "tanya" ,
56265: "tao" ,
56266: "tap" ,
56311: "tape" ,
56312: "taped" ,
56313: "taper" ,
56314: "tapes" ,
56315: "taps" ,
56316: "tar" ,
56321: "tardy" ,
56322: "target" ,
56323: "tarp" ,
56324: "tarry" ,
56325: "tart" ,
56326: "tarts" ,
56331: "task" ,
56332: "taste" ,
56333: "tasty" ,
56334: "tate" ,
56335: "tater" ,
56336: "tattle" ,
56341: "tau" ,
56342: "taunt" ,
56343: "taut" ,
56344: "tavern" ,
56345: "tax" ,
56346: "taxi" ,
56351: "tb" ,
56352: "tba" ,
56353: "tbsp" ,
56354: "tc" ,
56355: "td" ,
56356: "te" ,
56361: "tea" ,
56362: "teach" ,
56363: "teacup" ,
56364: "teak" ,
56365: "team" ,
56366: "teams" ,
56411: "tear" ,
56412: "tease" ,
56413: "tech" ,
56414: "ted" ,
56415: "teddy" ,
56416: "tee" ,
56421: "teen" ,
56422: "teens" ,
56423: "tees" ,
56424: "teeth" ,
56425: "tell" ,
56426: "tells" ,
56431: "temp" ,
56432: "temper" ,
56433: "temple" ,
56434: "tempo" ,
56435: "temps" ,
56436: "tempt" ,
56441: "ten" ,
56442: "tend" ,
56443: "tends" ,
56444: "tenor" ,
56445: "tens" ,
56446: "tense" ,
56451: "tent" ,
56452: "tenth" ,
56453: "tents" ,
56454: "term" ,
56455: "terms" ,
56456: "terra" ,
56461: "terry" ,
56462: "terse" ,
56463: "test" ,
56464: "tests" ,
56465: "testy" ,
56466: "tex" ,
56511: "texan" ,
56512: "texas" ,
56513: "text" ,
56514: "tf" ,
56515: "tg" ,
56516: "tgif" ,
56521: "th" ,
56522: "thai" ,
56523: "than" ,
56524: "thank" ,
56525: "that" ,
56526: "thaw" ,
56531: "thaws" ,
56532: "the" ,
56533: "theft" ,
56534: "their" ,
56535: "them" ,
56536: "theme" ,
56541: "then" ,
56542: "there" ,
56543: "these" ,
56544: "theta" ,
56545: "they" ,
56546: "thick" ,
56551: "thief" ,
56552: "thigh" ,
56553: "thin" ,
56554: "thing" ,
56555: "think" ,
56556: "thins" ,
56561: "third" ,
56562: "this" ,
56563: "tho" ,
56564: "thong" ,
56565: "thor" ,
56566: "thorn" ,
56611: "thorny" ,
56612: "those" ,
56613: "thread" ,
56614: "three" ,
56615: "threw" ,
56616: "throb" ,
56621: "throw" ,
56622: "throws" ,
56623: "thru" ,
56624: "thu" ,
56625: "thud" ,
56626: "thug" ,
56631: "thumb" ,
56632: "thump" ,
56633: "thur" ,
56634: "thus" ,
56635: "thyme" ,
56636: "ti" ,
56641: "tiara" ,
56642: "tibet" ,
56643: "tic" ,
56644: "tick" ,
56645: "ticket" ,
56646: "ticks" ,
56651: "tics" ,
56652: "tidal" ,
56653: "tidbit" ,
56654: "tide" ,
56655: "tidy" ,
56656: "tie" ,
56661: "tied" ,
56662: "tier" ,
56663: "ties" ,
56664: "tiger" ,
56665: "tight" ,
56666: "tile" ,
61111: "tiled" ,
61112: "tiles" ,
61113: "till" ,
61114: "tilt" ,
61115: "tim" ,
61116: "time" ,
61121: "times" ,
61122: "timex" ,
61123: "timid" ,
61124: "tin" ,
61125: "tina" ,
61126: "tinge" ,
61131: "tinny" ,
61132: "tint" ,
61133: "tiny" ,
61134: "tip" ,
61135: "tipoff" ,
61136: "tips" ,
61141: "tipsy" ,
61142: "tire" ,
61143: "tired" ,
61144: "tires" ,
61145: "title" ,
61146: "tj" ,
61151: "tk" ,
61152: "tl" ,
61153: "tlc" ,
61154: "tm" ,
61155: "tn" ,
61156: "tnt" ,
61161: "to" ,
61162: "toad" ,
61163: "toads" ,
61164: "toast" ,
61165: "toby" ,
61166: "today" ,
61211: "todd" ,
61212: "toe" ,
61213: "toes" ,
61214: "tofu" ,
61215: "toga" ,
61216: "toil" ,
61221: "toilet" ,
61222: "toils" ,
61223: "token" ,
61224: "tokyo" ,
61225: "told" ,
61226: "toll" ,
61231: "tolls" ,
61232: "tom" ,
61233: "tomb" ,
61234: "tombs" ,
61235: "tommy" ,
61236: "ton" ,
61241: "tonal" ,
61242: "tone" ,
61243: "toni" ,
61244: "tonic" ,
61245: "tons" ,
61246: "tonsil" ,
61251: "tony" ,
61252: "too" ,
61253: "took" ,
61254: "tool" ,
61255: "tools" ,
61256: "toot" ,
61261: "tooth" ,
61262: "top" ,
61263: "topaz" ,
61264: "topic" ,
61265: "topple" ,
61266: "tops" ,
61311: "topsy" ,
61312: "torah" ,
61313: "torch" ,
61314: "tore" ,
61315: "torn" ,
61316: "torso" ,
61321: "tort" ,
61322: "tory" ,
61323: "toss" ,
61324: "tot" ,
61325: "total" ,
61326: "tote" ,
61331: "totem" ,
61332: "tots" ,
61333: "touch" ,
61334: "tough" ,
61335: "tour" ,
61336: "tours" ,
61341: "tout" ,
61342: "tow" ,
61343: "towel" ,
61344: "tower" ,
61345: "town" ,
61346: "tows" ,
61351: "toxic" ,
61352: "toy" ,
61353: "toys" ,
61354: "tp" ,
61355: "tq" ,
61356: "tr" ,
61361: "trace" ,
61362: "track" ,
61363: "tract" ,
61364: "tracy" ,
61365: "trade" ,
61366: "trail" ,
61411: "train" ,
61412: "trait" ,
61413: "tramp" ,
61414: "trap" ,
61415: "traps" ,
61416: "trash" ,
61421: "tray" ,
61422: "trays" ,
61423: "tread" ,
61424: "treat" ,
61425: "treble" ,
61426: "tree" ,
61431: "trees" ,
61432: "trek" ,
61433: "trench" ,
61434: "trend" ,
61435: "trial" ,
61436: "tribe" ,
61441: "trick" ,
61442: "tricky" ,
61443: "tried" ,
61444: "tries" ,
61445: "trig" ,
61446: "trill" ,
61451: "trim" ,
61452: "trims" ,
61453: "trio" ,
61454: "trip" ,
61455: "tripe" ,
61456: "trips" ,
61461: "trite" ,
61462: "troll" ,
61463: "troop" ,
61464: "trot" ,
61465: "trots" ,
61466: "trout" ,
61511: "troy" ,
61512: "truce" ,
61513: "truck" ,
61514: "trudge" ,
61515: "trudy" ,
61516: "true" ,
61521: "truly" ,
61522: "trunk" ,
61523: "truss" ,
61524: "trust" ,
61525: "truth" ,
61526: "try" ,
61531: "ts" ,
61532: "tsar" ,
61533: "tsp" ,
61534: "tt" ,
61535: "ttt" ,
61536: "tttt" ,
61541: "tu" ,
61542: "tub" ,
61543: "tuba" ,
61544: "tube" ,
61545: "tubes" ,
61546: "tubs" ,
61551: "tuck" ,
61552: "tue" ,
61553: "tues" ,
61554: "tuft" ,
61555: "tufts" ,
61556: "tug" ,
61561: "tugs" ,
61562: "tulip" ,
61563: "tumble" ,
61564: "tuna" ,
61565: "tune" ,
61566: "tuned" ,
61611: "tunic" ,
61612: "tunnel" ,
61613: "turf" ,
61614: "turk" ,
61615: "turkey" ,
61616: "turn" ,
61621: "tush" ,
61622: "tusk" ,
61623: "tusks" ,
61624: "tut" ,
61625: "tutor" ,
61626: "tutu" ,
61631: "tuv" ,
61632: "tux" ,
61633: "tv" ,
61634: "tw" ,
61635: "twa" ,
61636: "twain" ,
61641: "tweak" ,
61642: "tweed" ,
61643: "twice" ,
61644: "twig" ,
61645: "twigs" ,
61646: "twin" ,
61651: "twine" ,
61652: "twins" ,
61653: "twirl" ,
61654: "twist" ,
61655: "twisty" ,
61656: "twit" ,
61661: "two" ,
61662: "twos" ,
61663: "tx" ,
61664: "ty" ,
61665: "tycoon" ,
61666: "tying" ,
62111: "tyke" ,
62112: "tyler" ,
62113: "type" ,
62114: "typed" ,
62115: "types" ,
62116: "typo" ,
62121: "tz" ,
62122: "u" ,
62123: "u's" ,
62124: "u-2" ,
62125: "ua" ,
62126: "ub" ,
62131: "uc" ,
62132: "ud" ,
62133: "ue" ,
62134: "uf" ,
62135: "ufo" ,
62136: "ug" ,
62141: "ugh" ,
62142: "ugly" ,
62143: "uh" ,
62144: "ui" ,
62145: "uj" ,
62146: "uk" ,
62151: "ul" ,
62152: "ulcer" ,
62153: "um" ,
62154: "umpire" ,
62155: "un" ,
62156: "uncle" ,
62161: "uncut" ,
62162: "under" ,
62163: "undo" ,
62164: "undue" ,
62165: "unfit" ,
62166: "unify" ,
62211: "union" ,
62212: "unit" ,
62213: "unite" ,
62214: "units" ,
62215: "unity" ,
62216: "unix" ,
62221: "untie" ,
62222: "until" ,
62223: "unto" ,
62224: "unwed" ,
62225: "uo" ,
62226: "up" ,
62231: "uphill" ,
62232: "uphold" ,
62233: "upi" ,
62234: "upon" ,
62235: "upper" ,
62236: "uproar" ,
62241: "ups" ,
62242: "upset" ,
62243: "uptake" ,
62244: "uq" ,
62245: "ur" ,
62246: "urban" ,
62251: "urge" ,
62252: "urged" ,
62253: "urges" ,
62254: "urine" ,
62255: "urn" ,
62256: "us" ,
62261: "usa" ,
62262: "usaf" ,
62263: "usage" ,
62264: "use" ,
62265: "used" ,
62266: "useful" ,
62311: "uses" ,
62312: "usher" ,
62313: "usia" ,
62314: "ussr" ,
62315: "usual" ,
62316: "usurp" ,
62321: "ut" ,
62322: "utah" ,
62323: "utmost" ,
62324: "utter" ,
62325: "uu" ,
62326: "uuu" ,
62331: "uuuu" ,
62332: "uv" ,
62333: "uvula" ,
62334: "uvw" ,
62335: "uw" ,
62336: "ux" ,
62341: "uy" ,
62342: "uz" ,
62343: "v" ,
62344: "v's" ,
62345: "v-8" ,
62346: "va" ,
62351: "vacuum" ,
62352: "vague" ,
62353: "vain" ,
62354: "val" ,
62355: "vale" ,
62356: "valet" ,
62361: "valid" ,
62362: "valor" ,
62363: "value" ,
62364: "valve" ,
62365: "vamp" ,
62366: "van" ,
62411: "vance" ,
62412: "vane" ,
62413: "vans" ,
62414: "vapor" ,
62415: "vary" ,
62416: "vase" ,
62421: "vases" ,
62422: "vast" ,
62423: "vat" ,
62424: "vats" ,
62425: "vault" ,
62426: "vb" ,
62431: "vc" ,
62432: "vcr" ,
62433: "vd" ,
62434: "ve" ,
62435: "veal" ,
62436: "veep" ,
62441: "veer" ,
62442: "veers" ,
62443: "veggie" ,
62444: "veil" ,
62445: "vein" ,
62446: "veins" ,
62451: "venal" ,
62452: "vend" ,
62453: "vendor" ,
62454: "vends" ,
62455: "venom" ,
62456: "vent" ,
62461: "vents" ,
62462: "venus" ,
62463: "vera" ,
62464: "verb" ,
62465: "verbs" ,
62466: "verdi" ,
62511: "verge" ,
62512: "verify" ,
62513: "vern" ,
62514: "verna" ,
62515: "verne" ,
62516: "verse" ,
62521: "verve" ,
62522: "very" ,
62523: "vessel" ,
62524: "vest" ,
62525: "vests" ,
62526: "vet" ,
62531: "veto" ,
62532: "vets" ,
62533: "vex" ,
62534: "vexed" ,
62535: "vexes" ,
62536: "vf" ,
62541: "vg" ,
62542: "vh" ,
62543: "vi" ,
62544: "via" ,
62545: "vial" ,
62546: "vibes" ,
62551: "vic" ,
62552: "vice" ,
62553: "vices" ,
62554: "vicky" ,
62555: "video" ,
62556: "vie" ,
62561: "viet" ,
62562: "view" ,
62563: "vigil" ,
62564: "vigor" ,
62565: "vii" ,
62566: "viii" ,
62611: "vile" ,
62612: "vinci" ,
62613: "vine" ,
62614: "vines" ,
62615: "vinyl" ,
62616: "viola" ,
62621: "violet" ,
62622: "vip" ,
62623: "virgil" ,
62624: "virgo" ,
62625: "virus" ,
62626: "visa" ,
62631: "vise" ,
62632: "visit" ,
62633: "visor" ,
62634: "vista" ,
62635: "vital" ,
62636: "vito" ,
62641: "viva" ,
62642: "vivian" ,
62643: "vivid" ,
62644: "vixen" ,
62645: "vj" ,
62646: "vk" ,
62651: "vl" ,
62652: "vlad" ,
62653: "vm" ,
62654: "vn" ,
62655: "vo" ,
62656: "vocal" ,
62661: "vodka" ,
62662: "vogue" ,
62663: "voice" ,
62664: "void" ,
62665: "volt" ,
62666: "volts" ,
63111: "volvo" ,
63112: "vomit" ,
63113: "vote" ,
63114: "vouch" ,
63115: "vow" ,
63116: "vowel" ,
63121: "vows" ,
63122: "vp" ,
63123: "vq" ,
63124: "vr" ,
63125: "vs" ,
63126: "vt" ,
63131: "vtol" ,
63132: "vu" ,
63133: "vulcan" ,
63134: "vv" ,
63135: "vvv" ,
63136: "vvvv" ,
63141: "vw" ,
63142: "vwx" ,
63143: "vx" ,
63144: "vy" ,
63145: "vz" ,
63146: "w" ,
63151: "w's" ,
63152: "w/o" ,
63153: "wa" ,
63154: "wacko" ,
63155: "wacky" ,
63156: "wad" ,
63161: "wade" ,
63162: "wades" ,
63163: "wafer" ,
63164: "waffle" ,
63165: "wag" ,
63166: "wage" ,
63211: "wager" ,
63212: "wages" ,
63213: "wagon" ,
63214: "wags" ,
63215: "wahoo" ,
63216: "waif" ,
63221: "wail" ,
63222: "wails" ,
63223: "waist" ,
63224: "wait" ,
63225: "wake" ,
63226: "waken" ,
63231: "waldo" ,
63232: "walk" ,
63233: "wall" ,
63234: "walls" ,
63235: "wally" ,
63236: "walrus" ,
63241: "walsh" ,
63242: "walt" ,
63243: "walton" ,
63244: "waltz" ,
63245: "wand" ,
63246: "wang" ,
63251: "want" ,
63252: "wants" ,
63253: "war" ,
63254: "ward" ,
63255: "warm" ,
63256: "warmth" ,
63261: "warn" ,
63262: "warns" ,
63263: "warp" ,
63264: "warren" ,
63265: "wars" ,
63266: "wart" ,
63311: "warts" ,
63312: "wary" ,
63313: "was" ,
63314: "wash" ,
63315: "wasp" ,
63316: "wasps" ,
63321: "waste" ,
63322: "watch" ,
63323: "water" ,
63324: "watt" ,
63325: "watts" ,
63326: "wave" ,
63331: "waved" ,
63332: "waver" ,
63333: "waves" ,
63334: "wavy" ,
63335: "wax" ,
63336: "waxy" ,
63341: "way" ,
63342: "wayne" ,
63343: "ways" ,
63344: "wb" ,
63345: "wc" ,
63346: "wd" ,
63351: "we" ,
63352: "we'd" ,
63353: "we'll" ,
63354: "we're" ,
63355: "we've" ,
63356: "weak" ,
63361: "wealth" ,
63362: "wear" ,
63363: "wears" ,
63364: "weary" ,
63365: "weave" ,
63366: "web" ,
63411: "webb" ,
63412: "webs" ,
63413: "wed" ,
63414: "wedge" ,
63415: "weds" ,
63416: "wee" ,
63421: "weed" ,
63422: "weedy" ,
63423: "week" ,
63424: "weeks" ,
63425: "weep" ,
63426: "weeps" ,
63431: "weigh" ,
63432: "weird" ,
63433: "welch" ,
63434: "weld" ,
63435: "well" ,
63436: "wells" ,
63441: "welsh" ,
63442: "wendy" ,
63443: "went" ,
63444: "wept" ,
63445: "were" ,
63446: "wes" ,
63451: "west" ,
63452: "wet" ,
63453: "wets" ,
63454: "wf" ,
63455: "wg" ,
63456: "wh" ,
63461: "whale" ,
63462: "wham" ,
63463: "wharf" ,
63464: "what" ,
63465: "wheat" ,
63466: "whee" ,
63511: "wheel" ,
63512: "when" ,
63513: "where" ,
63514: "whew" ,
63515: "which" ,
63516: "whiff" ,
63521: "while" ,
63522: "whim" ,
63523: "whine" ,
63524: "whinny" ,
63525: "whip" ,
63526: "whips" ,
63531: "whir" ,
63532: "whirl" ,
63533: "white" ,
63534: "whiz" ,
63535: "who" ,
63536: "who'd" ,
63541: "whoa" ,
63542: "whole" ,
63543: "whom" ,
63544: "whoop" ,
63545: "whoosh" ,
63546: "whose" ,
63551: "why" ,
63552: "wi" ,
63553: "wick" ,
63554: "wide" ,
63555: "widen" ,
63556: "wider" ,
63561: "widow" ,
63562: "width" ,
63563: "wield" ,
63564: "wife" ,
63565: "wig" ,
63566: "wigs" ,
63611: "wild" ,
63612: "wiley" ,
63613: "wilkes" ,
63614: "will" ,
63615: "wills" ,
63616: "willy" ,
63621: "wilma" ,
63622: "wilt" ,
63623: "wily" ,
63624: "wimp" ,
63625: "wimpy" ,
63626: "win" ,
63631: "wince" ,
63632: "winch" ,
63633: "wind" ,
63634: "windy" ,
63635: "wine" ,
63636: "wines" ,
63641: "wing" ,
63642: "wings" ,
63643: "wink" ,
63644: "winks" ,
63645: "winnie" ,
63646: "wino" ,
63651: "wins" ,
63652: "winter" ,
63653: "wipe" ,
63654: "wire" ,
63655: "wires" ,
63656: "wiry" ,
63661: "wise" ,
63662: "wiser" ,
63663: "wish" ,
63664: "wisp" ,
63665: "wispy" ,
63666: "wit" ,
64111: "witch" ,
64112: "with" ,
64113: "wits" ,
64114: "witty" ,
64115: "wj" ,
64116: "wk" ,
64121: "wl" ,
64122: "wm" ,
64123: "wn" ,
64124: "wnw" ,
64125: "wo" ,
64126: "woe" ,
64131: "woes" ,
64132: "wok" ,
64133: "woke" ,
64134: "wolf" ,
64135: "wolff" ,
64136: "woman" ,
64141: "womb" ,
64142: "women" ,
64143: "won" ,
64144: "won't" ,
64145: "wonder" ,
64146: "wong" ,
64151: "woo" ,
64152: "wood" ,
64153: "woods" ,
64154: "woody" ,
64155: "woof" ,
64156: "wool" ,
64161: "woos" ,
64162: "word" ,
64163: "words" ,
64164: "wordy" ,
64165: "wore" ,
64166: "work" ,
64211: "world" ,
64212: "worm" ,
64213: "worms" ,
64214: "wormy" ,
64215: "worn" ,
64216: "worry" ,
64221: "worse" ,
64222: "worst" ,
64223: "worth" ,
64224: "would" ,
64225: "wound" ,
64226: "wove" ,
64231: "woven" ,
64232: "wow" ,
64233: "wp" ,
64234: "wq" ,
64235: "wr" ,
64236: "wrap" ,
64241: "wrath" ,
64242: "wreak" ,
64243: "wreck" ,
64244: "wren" ,
64245: "wring" ,
64246: "wrist" ,
64251: "write" ,
64252: "writhe" ,
64253: "wrong" ,
64254: "wrote" ,
64255: "wry" ,
64256: "ws" ,
64261: "wsw" ,
64262: "wt" ,
64263: "wu" ,
64264: "wv" ,
64265: "ww" ,
64266: "wwi" ,
64311: "wwii" ,
64312: "www" ,
64313: "wwww" ,
64314: "wx" ,
64315: "wxy" ,
64316: "wy" ,
64321: "wyatt" ,
64322: "wylie" ,
64323: "wyman" ,
64324: "wynn" ,
64325: "wz" ,
64326: "x" ,
64331: "x's" ,
64332: "xa" ,
64333: "xb" ,
64334: "xc" ,
64335: "xd" ,
64336: "xe" ,
64341: "xerox" ,
64342: "xf" ,
64343: "xg" ,
64344: "xh" ,
64345: "xi" ,
64346: "xii" ,
64351: "xiii" ,
64352: "xiv" ,
64353: "xj" ,
64354: "xk" ,
64355: "xl" ,
64356: "xm" ,
64361: "xmas" ,
64362: "xn" ,
64363: "xo" ,
64364: "xp" ,
64365: "xq" ,
64366: "xr" ,
64411: "xray" ,
64412: "xrays" ,
64413: "xs" ,
64414: "xt" ,
64415: "xu" ,
64416: "xv" ,
64421: "xvi" ,
64422: "xvii" ,
64423: "xw" ,
64424: "xx" ,
64425: "xxx" ,
64426: "xxxx" ,
64431: "xy" ,
64432: "xyz" ,
64433: "xz" ,
64434: "y" ,
64435: "y'all" ,
64436: "y's" ,
64441: "ya" ,
64442: "yacht" ,
64443: "yahoo" ,
64444: "yak" ,
64445: "yale" ,
64446: "yam" ,
64451: "yamaha" ,
64452: "yams" ,
64453: "yang" ,
64454: "yank" ,
64455: "yanks" ,
64456: "yap" ,
64461: "yard" ,
64462: "yards" ,
64463: "yarn" ,
64464: "yawn" ,
64465: "yawns" ,
64466: "yb" ,
64511: "yc" ,
64512: "yd" ,
64513: "ye" ,
64514: "yea" ,
64515: "yeah" ,
64516: "year" ,
64521: "yearn" ,
64522: "yeast" ,
64523: "yeats" ,
64524: "yell" ,
64525: "yellow" ,
64526: "yelp" ,
64531: "yen" ,
64532: "yep" ,
64533: "yes" ,
64534: "yet" ,
64535: "yew" ,
64536: "yews" ,
64541: "yf" ,
64542: "yg" ,
64543: "yh" ,
64544: "yi" ,
64545: "yield" ,
64546: "yin" ,
64551: "yip" ,
64552: "yips" ,
64553: "yj" ,
64554: "yk" ,
64555: "yl" ,
64556: "ym" ,
64561: "yn" ,
64562: "yo" ,
64563: "yodel" ,
64564: "yoga" ,
64565: "yogi" ,
64566: "yoke" ,
64611: "yokel" ,
64612: "yolk" ,
64613: "yore" ,
64614: "york" ,
64615: "you" ,
64616: "you'd" ,
64621: "young" ,
64622: "your" ,
64623: "yours" ,
64624: "youth" ,
64625: "yoyo" ,
64626: "yp" ,
64631: "yq" ,
64632: "yr" ,
64633: "yrs" ,
64634: "ys" ,
64635: "yt" ,
64636: "ytd" ,
64641: "yu" ,
64642: "yucca" ,
64643: "yuck" ,
64644: "yukon" ,
64645: "yule" ,
64646: "yv" ,
64651: "yw" ,
64652: "yx" ,
64653: "yy" ,
64654: "yyy" ,
64655: "yyyy" ,
64656: "yz" ,
64661: "z" ,
64662: "z's" ,
64663: "za" ,
64664: "zag" ,
64665: "zap" ,
64666: "zaps" ,
65111: "zb" ,
65112: "zc" ,
65113: "zd" ,
65114: "ze" ,
65115: "zeal" ,
65116: "zealot" ,
65121: "zebra" ,
65122: "zeke" ,
65123: "zen" ,
65124: "zero" ,
65125: "zest" ,
65126: "zesty" ,
65131: "zeta" ,
65132: "zf" ,
65133: "zg" ,
65134: "zh" ,
65135: "zi" ,
65136: "zig" ,
65141: "ziggy" ,
65142: "zigzag" ,
65143: "zilch" ,
65144: "zinc" ,
65145: "zing" ,
65146: "zion" ,
65151: "zip" ,
65152: "zips" ,
65153: "ziti" ,
65154: "zj" ,
65155: "zk" ,
65156: "zl" ,
65161: "zm" ,
65162: "zn" ,
65163: "zo" ,
65164: "zoe" ,
65165: "zone" ,
65166: "zoned" ,
65211: "zoo" ,
65212: "zoom" ,
65213: "zooms" ,
65214: "zoos" ,
65215: "zowie" ,
65216: "zp" ,
65221: "zq" ,
65222: "zr" ,
65223: "zs" ,
65224: "zt" ,
65225: "zu" ,
65226: "zulu" ,
65231: "zv" ,
65232: "zw" ,
65233: "zx" ,
65234: "zy" ,
65235: "zz" ,
65236: "zzz" ,
65241: "zzzz" ,
65242: "!" ,
65243: "!!" ,
65244: """""" ,
65245: "#" ,
65246: "##" ,
65251: "$" ,
65252: "$$" ,
65253: "%" ,
65254: "%%" ,
65255: "&" ,
65256: "(" ,
65261: "()" ,
65262: "(c)" ,
65263: "(r)" ,
65264: "(tm)" ,
65265: ")" ,
65266: "*" ,
65311: "**" ,
65312: "+" ,
65313: "-" ,
65314: "0" ,
65315: "007" ,
65316: "1" ,
65321: "1%" ,
65322: "1/2" ,
65323: "1/3" ,
65324: "1/4" ,
65325: "1/8" ,
65326: "10" ,
65331: "10%" ,
65332: "100" ,
65333: "100%" ,
65334: "1000" ,
65335: "100th" ,
65336: "101" ,
65341: "101st" ,
65342: "10:00" ,
65343: "10:30" ,
65344: "10th" ,
65345: "11" ,
65346: "111" ,
65351: "1111" ,
65352: "11:00" ,
65353: "11:30" ,
65354: "11th" ,
65355: "12" ,
65356: "123" ,
65361: "1234" ,
65362: "12:00" ,
65363: "12:30" ,
65364: "12th" ,
65365: "13" ,
65366: "13th" ,
65411: "14" ,
65412: "1492" ,
65413: "14th" ,
65414: "15" ,
65415: "15%" ,
65416: "1500" ,
65421: "15th" ,
65422: "16" ,
65423: "1600" ,
65424: "16th" ,
65425: "17" ,
65426: "1700" ,
65431: "1776" ,
65432: "17th" ,
65433: "18" ,
65434: "1800" ,
65435: "18th" ,
65436: "19" ,
65441: "1900" ,
65442: "1910" ,
65443: "1920" ,
65444: "1925" ,
65445: "1930" ,
65446: "1935" ,
65451: "1940" ,
65452: "1945" ,
65453: "1950" ,
65454: "1955" ,
65455: "1960" ,
65456: "1965" ,
65461: "1970" ,
65462: "1975" ,
65463: "1980" ,
65464: "1985" ,
65465: "1990" ,
65466: "1991" ,
65511: "1992" ,
65512: "1993" ,
65513: "1994" ,
65514: "1995" ,
65515: "1996" ,
65516: "1997" ,
65521: "19th" ,
65522: "1:00" ,
65523: "1:30" ,
65524: "1st" ,
65525: "2" ,
65526: "2%" ,
65531: "2/3" ,
65532: "20" ,
65533: "20%" ,
65534: "200" ,
65535: "2000" ,
65536: "2001" ,
65541: "2020" ,
65542: "20th" ,
65543: "21" ,
65544: "21st" ,
65545: "22" ,
65546: "222" ,
65551: "2222" ,
65552: "22nd" ,
65553: "23" ,
65554: "234" ,
65555: "2345" ,
65556: "23rd" ,
65561: "24" ,
65562: "2468" ,
65563: "24th" ,
65564: "25" ,
65565: "25%" ,
65566: "25th" ,
65611: "26" ,
65612: "26th" ,
65613: "27" ,
65614: "27th" ,
65615: "28" ,
65616: "28th" ,
65621: "29" ,
65622: "29th" ,
65623: "2:00" ,
65624: "2:30" ,
65625: "2nd" ,
65626: "3" ,
65631: "3%" ,
65632: "3/4" ,
65633: "3/8" ,
65634: "30" ,
65635: "30%" ,
65636: "300" ,
65641: "3000" ,
65642: "30th" ,
65643: "31" ,
65644: "31st" ,
65645: "32" ,
65646: "32nd" ,
65651: "33" ,
65652: "333" ,
65653: "3333" ,
65654: "33rd" ,
65655: "34" ,
65656: "345" ,
65661: "3456" ,
65662: "34th" ,
65663: "35" ,
65664: "35%" ,
65665: "35th" ,
65666: "36" ,
66111: "36th" ,
66112: "37" ,
66113: "37th" ,
66114: "38" ,
66115: "38th" ,
66116: "39" ,
66121: "39th" ,
66122: "3:00" ,
66123: "3:30" ,
66124: "3rd" ,
66125: "4" ,
66126: "4%" ,
66131: "40" ,
66132: "40%" ,
66133: "400" ,
66134: "4000" ,
66135: "40th" ,
66136: "41" ,
66141: "41st" ,
66142: "42" ,
66143: "42nd" ,
66144: "43" ,
66145: "4321" ,
66146: "43rd" ,
66151: "44" ,
66152: "444" ,
66153: "4444" ,
66154: "44th" ,
66155: "45" ,
66156: "45%" ,
66161: "456" ,
66162: "4567" ,
66163: "45th" ,
66164: "46" ,
66165: "46th" ,
66166: "47" ,
66211: "47th" ,
66212: "48" ,
66213: "48th" ,
66214: "49" ,
66215: "49th" ,
66216: "4:00" ,
66221: "4:30" ,
66222: "4th" ,
66223: "5" ,
66224: "5%" ,
66225: "5/8" ,
66226: "50" ,
66231: "50%" ,
66232: "500" ,
66233: "5000" ,
66234: "50th" ,
66235: "51" ,
66236: "51st" ,
66241: "52" ,
66242: "52nd" ,
66243: "53" ,
66244: "53rd" ,
66245: "54" ,
66246: "54th" ,
66251: "55" ,
66252: "55%" ,
66253: "555" ,
66254: "5555" ,
66255: "55th" ,
66256: "56" ,
66261: "567" ,
66262: "5678" ,
66263: "56th" ,
66264: "57" ,
66265: "57th" ,
66266: "58" ,
66311: "58th" ,
66312: "59" ,
66313: "59th" ,
66314: "5:00" ,
66315: "5:30" ,
66316: "5th" ,
66321: "6" ,
66322: "6%" ,
66323: "60" ,
66324: "60%" ,
66325: "600" ,
66326: "6000" ,
66331: "60th" ,
66332: "61" ,
66333: "61st" ,
66334: "62" ,
66335: "62nd" ,
66336: "63" ,
66341: "63rd" ,
66342: "64" ,
66343: "65" ,
66344: "65%" ,
66345: "65th" ,
66346: "66" ,
66351: "666" ,
66352: "6666" ,
66353: "66th" ,
66354: "67" ,
66355: "678" ,
66356: "6789" ,
66361: "67th" ,
66362: "68" ,
66363: "68th" ,
66364: "69" ,
66365: "69th" ,
66366: "6:00" ,
66411: "6:30" ,
66412: "6th" ,
66413: "7" ,
66414: "7%" ,
66415: "7/8" ,
66416: "70" ,
66421: "70%" ,
66422: "700" ,
66423: "7000" ,
66424: "70th" ,
66425: "71" ,
66426: "71st" ,
66431: "72" ,
66432: "72nd" ,
66433: "73" ,
66434: "73rd" ,
66435: "74" ,
66436: "74th" ,
66441: "75" ,
66442: "75%" ,
66443: "75th" ,
66444: "76" ,
66445: "76th" ,
66446: "77" ,
66451: "777" ,
66452: "7777" ,
66453: "77th" ,
66454: "78" ,
66455: "789" ,
66456: "78th" ,
66461: "79" ,
66462: "79th" ,
66463: "7:00" ,
66464: "7:30" ,
66465: "7th" ,
66466: "8" ,
66511: "8%" ,
66512: "80" ,
66513: "80%" ,
66514: "800" ,
66515: "8000" ,
66516: "80th" ,
66521: "81" ,
66522: "81st" ,
66523: "82" ,
66524: "82nd" ,
66525: "83" ,
66526: "83rd" ,
66531: "84" ,
66532: "84th" ,
66533: "85" ,
66534: "85%" ,
66535: "85th" ,
66536: "86" ,
66541: "86th" ,
66542: "87" ,
66543: "87th" ,
66544: "88" ,
66545: "888" ,
66546: "8888" ,
66551: "88th" ,
66552: "89" ,
66553: "89th" ,
66554: "8:00" ,
66555: "8:30" ,
66556: "8th" ,
66561: "9" ,
66562: "9%" ,
66563: "9-5" ,
66564: "90" ,
66565: "90%" ,
66566: "900" ,
66611: "9000" ,
66612: "90th" ,
66613: "91" ,
66614: "91st" ,
66615: "92" ,
66616: "92nd" ,
66621: "93" ,
66622: "93rd" ,
66623: "94" ,
66624: "94th" ,
66625: "95" ,
66626: "95%" ,
66631: "95th" ,
66632: "96" ,
66633: "96th" ,
66634: "97" ,
66635: "97th" ,
66636: "98" ,
66641: "98%" ,
66642: "98.6" ,
66643: "9876" ,
66644: "98th" ,
66645: "99" ,
66646: "99%" ,
66651: "999" ,
66652: "9999" ,
66653: "99th" ,
66654: "9:00" ,
66655: "9:30" ,
66656: "9th" ,
66661: ":" ,
66662: ";" ,
66663: "=" ,
66664: "?" ,
66665: "??" ,
66666: "@"}
|
jthomp888999/Diceware
|
dicts/dice_dict2.py
|
Python
|
mit
| 133,998
|
[
"Amber",
"BLAST",
"Brian",
"Elk",
"GULP",
"Galaxy",
"Jaguar",
"MOE",
"MOOSE",
"SIESTA",
"VisIt"
] |
32dd0ab3d63fe2238d9912fbcf517fb0a1dcdcbd6c72a88c0cb1fbf44037f273
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
"""Helper for defining custom MooseDocs logging."""
import logging
import traceback
import multiprocessing
import collections
import mooseutils
import moosesqa
import MooseDocs
class MooseDocsFormatter(logging.Formatter):
"""
A formatter that is aware of the class hierarchy of the MooseDocs library.
Call the init_logging function to initialize the use of this custom formatter.
"""
COLOR = dict(DEBUG='CYAN',
INFO='RESET',
WARNING='LIGHT_YELLOW',
ERROR='LIGHT_RED',
CRITICAL='MAGENTA')
def format(self, record):
"""Format the supplied logging record and count the occurrences."""
tid = multiprocessing.current_process().name
msg = '{} ({}): {}'.format(record.name, tid, logging.Formatter.format(self, record))
return mooseutils.colorText(msg, self.COLOR[record.levelname])
class MultiprocessingHandler(logging.StreamHandler):
"""A simple handler that locks when writing with multiprocessing."""
COUNTS = {logging.CRITICAL:multiprocessing.Value('I', 0, lock=True),
logging.ERROR:multiprocessing.Value('I', 0, lock=True),
logging.WARNING:multiprocessing.Value('I', 0, lock=True),
logging.INFO:multiprocessing.Value('I', 0, lock=True),
logging.DEBUG:multiprocessing.Value('I', 0, lock=True)}
def getCount(self, level):
return MultiprocessingHandler.COUNTS[level].value
def handle(self, record):
super().handle(record)
with MultiprocessingHandler.COUNTS[record.levelno].get_lock():
MultiprocessingHandler.COUNTS[record.levelno].value += 1
def flush(self):
"""Lock when flushing logging messages."""
if self._lock:
with self._lock:
super(MultiprocessingHandler, self).flush()
else:
super(MultiprocessingHandler, self).flush()
def createLock(self):
"""logging by default uses threading, use a multiprocessing lock instead."""
self.lock = None
self._lock = multiprocessing.Lock()
def aquire(self):
"""Disable."""
pass
def release(self):
"""Disable."""
pass
def init_logging(level=logging.INFO, silent=False):
"""
Call this function to initialize the MooseDocs logging formatter.
"""
# Custom format that colors and counts errors/warnings
if silent:
handler = moosesqa.SilentRecordHandler()
else:
handler = MultiprocessingHandler()
formatter = MooseDocsFormatter()
handler.setFormatter(formatter)
# Setup the custom formatter
log = logging.getLogger('MooseDocs')
log.addHandler(handler)
log.setLevel(level)
MooseDocs.LOG_LEVEL = level
def report_exception(msg, *args):
"""Helper to output exceptions in logs."""
msg = msg.format(*args)
msg += '\n{}\n'.format(mooseutils.colorText(traceback.format_exc(), 'GREY'))
return msg
|
harterj/moose
|
python/MooseDocs/common/log.py
|
Python
|
lgpl-2.1
| 3,295
|
[
"MOOSE"
] |
10fc20638ebc527a78862195d8a298d5d467693f40f37f32bab38fc3fde287fa
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import collections
import numpy as np
import operator
import os
import functools
from math import exp, sqrt
from monty.serialization import loadfn
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.core.periodic_table import get_el_sp
"""
This module implements classes to perform bond valence analyses.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Oct 26, 2012"
# Let's initialize some module level properties.
# List of electronegative elements specified in M. O'Keefe, & N. Brese,
# JACS, 1991, 113(9), 3226-3229. doi:10.1021/ja00009a002.
ELECTRONEG = [Element(sym) for sym in ["H", "B", "C", "Si",
"N", "P", "As", "Sb",
"O", "S", "Se", "Te",
"F", "Cl", "Br", "I"]]
module_dir = os.path.dirname(os.path.abspath(__file__))
# Read in BV parameters.
BV_PARAMS = {}
for k, v in loadfn(os.path.join(module_dir, "bvparam_1991.yaml")).items():
BV_PARAMS[Element(k)] = v
# Read in yaml containing data-mined ICSD BV data.
all_data = loadfn(os.path.join(module_dir, "icsd_bv.yaml"))
ICSD_BV_DATA = {Specie.from_string(sp): data
for sp, data in all_data["bvsum"].items()}
PRIOR_PROB = {Specie.from_string(sp): data
for sp, data in all_data["occurrence"].items()}
def calculate_bv_sum(site, nn_list, scale_factor=1.0):
"""
Calculates the BV sum of a site.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA).
"""
el1 = Element(site.specie.symbol)
bvsum = 0
for (nn, dist) in nn_list:
el2 = Element(nn.specie.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += vij * (1 if el1.X < el2.X else -1)
return bvsum
def calculate_bv_sum_unordered(site, nn_list, scale_factor=1):
"""
Calculates the BV sum of a site for unordered structures.
Args:
site:
The site
nn_list:
List of nearest neighbors in the format [(nn_site, dist), ...].
scale_factor:
A scale factor to be applied. This is useful for scaling distance,
esp in the case of calculation-relaxed structures which may tend
to under (GGA) or over bind (LDA).
"""
# If the site "site" has N partial occupations as : f_{site}_0,
# f_{site}_1, ... f_{site}_N of elements
# X_{site}_0, X_{site}_1, ... X_{site}_N, and each neighbors nn_i in nn
# has N_{nn_i} partial occupations as :
# f_{nn_i}_0, f_{nn_i}_1, ..., f_{nn_i}_{N_{nn_i}}, then the bv sum of
# site "site" is obtained as :
# \sum_{nn} \sum_j^N \sum_k^{N_{nn}} f_{site}_j f_{nn_i}_k vij_full
# where vij_full is the valence bond of the fully occupied bond
bvsum = 0
for specie1, occu1 in site.species.items():
el1 = Element(specie1.symbol)
for (nn, dist) in nn_list:
for specie2, occu2 in nn.species.items():
el2 = Element(specie2.symbol)
if (el1 in ELECTRONEG or el2 in ELECTRONEG) and el1 != el2:
r1 = BV_PARAMS[el1]["r"]
r2 = BV_PARAMS[el2]["r"]
c1 = BV_PARAMS[el1]["c"]
c2 = BV_PARAMS[el2]["c"]
R = r1 + r2 - r1 * r2 * (sqrt(c1) - sqrt(c2)) ** 2 / \
(c1 * r1 + c2 * r2)
vij = exp((R - dist * scale_factor) / 0.31)
bvsum += occu1 * occu2 * vij * (1 if el1.X < el2.X else -1)
return bvsum
class BVAnalyzer:
"""
This class implements a maximum a posteriori (MAP) estimation method to
determine oxidation states in a structure. The algorithm is as follows:
1) The bond valence sum of all symmetrically distinct sites in a structure
is calculated using the element-based parameters in M. O'Keefe, & N. Brese,
JACS, 1991, 113(9), 3226-3229. doi:10.1021/ja00009a002.
2) The posterior probabilities of all oxidation states is then calculated
using: P(oxi_state/BV) = K * P(BV/oxi_state) * P(oxi_state), where K is
a constant factor for each element. P(BV/oxi_state) is calculated as a
Gaussian with mean and std deviation determined from an analysis of
the ICSD. The posterior P(oxi_state) is determined from a frequency
analysis of the ICSD.
3) The oxidation states are then ranked in order of decreasing probability
and the oxidation state combination that result in a charge neutral cell
is selected.
"""
CHARGE_NEUTRALITY_TOLERANCE = 0.00001
def __init__(self, symm_tol=0.1, max_radius=4, max_permutations=100000,
distance_scale_factor=1.015,
charge_neutrality_tolerance=CHARGE_NEUTRALITY_TOLERANCE,
forbidden_species=None):
"""
Initializes the BV analyzer, with useful defaults.
Args:
symm_tol:
Symmetry tolerance used to determine which sites are
symmetrically equivalent. Set to 0 to turn off symmetry.
max_radius:
Maximum radius in Angstrom used to find nearest neighbors.
max_permutations:
The maximum number of permutations of oxidation states to test.
distance_scale_factor:
A scale factor to be applied. This is useful for scaling
distances, esp in the case of calculation-relaxed structures
which may tend to under (GGA) or over bind (LDA). The default
of 1.015 works for GGA. For experimental structure, set this to
1.
charge_neutrality_tolerance:
Tolerance on the charge neutrality when unordered structures
are at stake.
forbidden_species:
List of species that are forbidden (example : ["O-"] cannot be
used) It is used when e.g. someone knows that some oxidation
state cannot occur for some atom in a structure or list of
structures.
"""
self.symm_tol = symm_tol
self.max_radius = max_radius
self.max_permutations = max_permutations
self.dist_scale_factor = distance_scale_factor
self.charge_neutrality_tolerance = charge_neutrality_tolerance
forbidden_species = [get_el_sp(sp) for sp in forbidden_species] if \
forbidden_species else []
self.icsd_bv_data = {get_el_sp(specie): data
for specie, data in ICSD_BV_DATA.items()
if specie not in forbidden_species} \
if len(forbidden_species) > 0 else ICSD_BV_DATA
def _calc_site_probabilities(self, site, nn):
el = site.specie.symbol
bv_sum = calculate_bv_sum(site, nn,
scale_factor=self.dist_scale_factor)
prob = {}
for sp, data in self.icsd_bv_data.items():
if sp.symbol == el and sp.oxi_state != 0 and data["std"] > 0:
u = data["mean"]
sigma = data["std"]
# Calculate posterior probability. Note that constant
# factors are ignored. They have no effect on the results.
prob[sp.oxi_state] = exp(-(bv_sum - u) ** 2 / 2 /
(sigma ** 2)) \
/ sigma * PRIOR_PROB[sp]
# Normalize the probabilities
try:
prob = {k: v / sum(prob.values()) for k, v in prob.items()}
except ZeroDivisionError:
prob = {k: 0.0 for k in prob}
return prob
def _calc_site_probabilities_unordered(self, site, nn):
bv_sum = calculate_bv_sum_unordered(
site, nn, scale_factor=self.dist_scale_factor)
prob = {}
for specie, occu in site.species.items():
el = specie.symbol
prob[el] = {}
for sp, data in self.icsd_bv_data.items():
if sp.symbol == el and sp.oxi_state != 0 and data["std"] > 0:
u = data["mean"]
sigma = data["std"]
# Calculate posterior probability. Note that constant
# factors are ignored. They have no effect on the results.
prob[el][sp.oxi_state] = exp(-(bv_sum - u) ** 2 / 2 /
(sigma ** 2)) \
/ sigma * PRIOR_PROB[sp]
# Normalize the probabilities
try:
prob[el] = {k: v / sum(prob[el].values())
for k, v in prob[el].items()}
except ZeroDivisionError:
prob[el] = {k: 0.0 for k in prob[el]}
return prob
def get_valences(self, structure):
"""
Returns a list of valences for the structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A list of valences for each site in the structure (for an ordered
structure), e.g., [1, 1, -2] or a list of lists with the
valences for each fractional element of each site in the
structure (for an unordered structure),
e.g., [[2, 4], [3], [-2], [-2], [-2]]
Raises:
A ValueError if the valences cannot be determined.
"""
els = [Element(el.symbol) for el in structure.composition.elements]
if not set(els).issubset(set(BV_PARAMS.keys())):
raise ValueError(
"Structure contains elements not in set of BV parameters!"
)
# Perform symmetry determination and get sites grouped by symmetry.
if self.symm_tol:
finder = SpacegroupAnalyzer(structure, self.symm_tol)
symm_structure = finder.get_symmetrized_structure()
equi_sites = symm_structure.equivalent_sites
else:
equi_sites = [[site] for site in structure]
# Sort the equivalent sites by decreasing electronegativity.
equi_sites = sorted(equi_sites,
key=lambda sites: -sites[0].species
.average_electroneg)
# Get a list of valences and probabilities for each symmetrically
# distinct site.
valences = []
all_prob = []
if structure.is_ordered:
for sites in equi_sites:
test_site = sites[0]
nn = structure.get_neighbors(test_site, self.max_radius)
prob = self._calc_site_probabilities(test_site, nn)
all_prob.append(prob)
val = list(prob.keys())
# Sort valences in order of decreasing probability.
val = sorted(val, key=lambda v: -prob[v])
# Retain probabilities that are at least 1/100 of highest prob.
valences.append(
list(filter(lambda v: prob[v] > 0.01 * prob[val[0]],
val)))
else:
full_all_prob = []
for sites in equi_sites:
test_site = sites[0]
nn = structure.get_neighbors(test_site, self.max_radius)
prob = self._calc_site_probabilities_unordered(test_site, nn)
all_prob.append(prob)
full_all_prob.extend(prob.values())
vals = []
for (elsp, occ) in get_z_ordered_elmap(
test_site.species):
val = list(prob[elsp.symbol].keys())
# Sort valences in order of decreasing probability.
val = sorted(val, key=lambda v: -prob[elsp.symbol][v])
# Retain probabilities that are at least 1/100 of highest
# prob.
vals.append(
list(filter(
lambda v: prob[elsp.symbol][v] > 0.001 * prob[
elsp.symbol][val[0]], val)))
valences.append(vals)
# make variables needed for recursion
if structure.is_ordered:
nsites = np.array([len(i) for i in equi_sites])
vmin = np.array([min(i) for i in valences])
vmax = np.array([max(i) for i in valences])
self._n = 0
self._best_score = 0
self._best_vset = None
def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
for i, sites in enumerate(equi_sites):
el_oxi[sites[0].specie.symbol].append(v_set[i])
max_diff = max([max(v) - min(v) for v in el_oxi.values()])
if max_diff > 1:
return
score = functools.reduce(
operator.mul, [all_prob[i][v] for i, v in enumerate(v_set)])
if score > self._best_score:
self._best_vset = v_set
self._best_score = score
def _recurse(assigned=[]):
# recurses to find permutations of valences based on whether a
# charge balanced assignment can still be found
if self._n > self.max_permutations:
return
i = len(assigned)
highest = vmax.copy()
highest[:i] = assigned
highest *= nsites
highest = np.sum(highest)
lowest = vmin.copy()
lowest[:i] = assigned
lowest *= nsites
lowest = np.sum(lowest)
if highest < 0 or lowest > 0:
self._n += 1
return
if i == len(valences):
evaluate_assignment(assigned)
self._n += 1
return
else:
for v in valences[i]:
new_assigned = list(assigned)
_recurse(new_assigned + [v])
else:
nsites = np.array([len(i) for i in equi_sites])
tmp = []
attrib = []
for insite, nsite in enumerate(nsites):
for val in valences[insite]:
tmp.append(nsite)
attrib.append(insite)
new_nsites = np.array(tmp)
fractions = []
elements = []
for sites in equi_sites:
for sp, occu in get_z_ordered_elmap(sites[0].species):
elements.append(sp.symbol)
fractions.append(occu)
fractions = np.array(fractions, np.float)
new_valences = []
for vals in valences:
for val in vals:
new_valences.append(val)
vmin = np.array([min(i) for i in new_valences], np.float)
vmax = np.array([max(i) for i in new_valences], np.float)
self._n = 0
self._best_score = 0
self._best_vset = None
def evaluate_assignment(v_set):
el_oxi = collections.defaultdict(list)
jj = 0
for i, sites in enumerate(equi_sites):
for specie, occu in get_z_ordered_elmap(
sites[0].species):
el_oxi[specie.symbol].append(v_set[jj])
jj += 1
max_diff = max([max(v) - min(v) for v in el_oxi.values()])
if max_diff > 2:
return
score = six.moves.reduce(
operator.mul,
[all_prob[attrib[iv]][elements[iv]][vv]
for iv, vv in enumerate(v_set)])
if score > self._best_score:
self._best_vset = v_set
self._best_score = score
def _recurse(assigned=[]):
# recurses to find permutations of valences based on whether a
# charge balanced assignment can still be found
if self._n > self.max_permutations:
return
i = len(assigned)
highest = vmax.copy()
highest[:i] = assigned
highest *= new_nsites
highest *= fractions
highest = np.sum(highest)
lowest = vmin.copy()
lowest[:i] = assigned
lowest *= new_nsites
lowest *= fractions
lowest = np.sum(lowest)
if (highest < -self.charge_neutrality_tolerance or
lowest > self.charge_neutrality_tolerance):
self._n += 1
return
if i == len(new_valences):
evaluate_assignment(assigned)
self._n += 1
return
else:
for v in new_valences[i]:
new_assigned = list(assigned)
_recurse(new_assigned + [v])
_recurse()
if self._best_vset:
if structure.is_ordered:
assigned = {}
for val, sites in zip(self._best_vset, equi_sites):
for site in sites:
assigned[site] = val
return [int(assigned[site]) for site in structure]
else:
assigned = {}
new_best_vset = []
for ii in range(len(equi_sites)):
new_best_vset.append(list())
for ival, val in enumerate(self._best_vset):
new_best_vset[attrib[ival]].append(val)
for val, sites in zip(new_best_vset, equi_sites):
for site in sites:
assigned[site] = val
return [[int(frac_site) for frac_site in assigned[site]]
for site in structure]
else:
raise ValueError("Valences cannot be assigned!")
def get_oxi_state_decorated_structure(self, structure):
"""
Get an oxidation state decorated structure. This currently works only
for ordered structures only.
Args:
structure: Structure to analyze
Returns:
A modified structure that is oxidation state decorated.
Raises:
ValueError if the valences cannot be determined.
"""
s = structure.copy()
if s.is_ordered:
valences = self.get_valences(s)
s.add_oxidation_state_by_site(valences)
else:
valences = self.get_valences(s)
s = add_oxidation_state_by_site_fraction(s, valences)
return s
def get_z_ordered_elmap(comp):
"""
Arbitrary ordered elmap on the elements/species of a composition of a
given site in an unordered structure. Returns a list of tuples (
element_or_specie: occupation) in the arbitrary order.
The arbitrary order is based on the Z of the element and the smallest
fractional occupations first.
Example : {"Ni3+": 0.2, "Ni4+": 0.2, "Cr3+": 0.15, "Zn2+": 0.34,
"Cr4+": 0.11} will yield the species in the following order :
Cr4+, Cr3+, Ni3+, Ni4+, Zn2+ ... or
Cr4+, Cr3+, Ni4+, Ni3+, Zn2+
"""
return sorted([(elsp, comp[elsp]) for elsp in comp.keys()])
def add_oxidation_state_by_site_fraction(structure, oxidation_states):
"""
Add oxidation states to a structure by fractional site.
Args:
oxidation_states (list): List of list of oxidation states for each
site fraction for each site.
E.g., [[2, 4], [3], [-2], [-2], [-2]]
"""
try:
for i, site in enumerate(structure):
new_sp = collections.defaultdict(float)
for j, (el, occu) in enumerate(get_z_ordered_elmap(site
.species)):
specie = Specie(el.symbol, oxidation_states[i][j])
new_sp[specie] += occu
structure[i] = new_sp
return structure
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the list.")
|
dongsenfo/pymatgen
|
pymatgen/analysis/bond_valence.py
|
Python
|
mit
| 21,312
|
[
"Gaussian",
"pymatgen"
] |
64b38a4e68d324c643b2da6357e159915c91ab4d03476baab5fc22d5df90d355
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
# standard python libraries
import logging
from django.contrib.auth import get_user_model
from galaxy.main.models import (Role, ImportTask,
ImportTaskMessage, RoleVersion,
NotificationSecret, Notification,
Subscription, Stargazer)
logger = logging.getLogger('galaxy.api.access')
__all__ = ['check_user_access']
User = get_user_model()
access_registry = {
# <model_class>: [<access_class>, ...],
# ...
}
def register_access(model_class, access_class):
access_classes = access_registry.setdefault(model_class, [])
access_classes.append(access_class)
def check_user_access(user, model_class, action, *args, **kwargs):
'''
Return True if user can perform action against model_class with the
provided parameters.
'''
for access_class in access_registry.get(model_class, []):
access_instance = access_class(user)
access_method = getattr(access_instance, 'can_%s' % action, None)
if not access_method:
continue
result = access_method(*args, **kwargs)
logger.debug('%s.%s %r returned %r', access_instance.__class__.__name__,
access_method.__name__, args, result)
if result:
return result
# logger.debug('check_user_access: %s %s %s returned %s', user, model_class, action, False)
return False
def get_pk_from_dict(_dict, key):
'''
Helper for obtaining a pk from user data dict or None if not present.
'''
try:
return int(_dict[key])
except (TypeError, KeyError, ValueError):
return None
class BaseAccess(object):
'''
Base class for checking user access to a given model. Subclasses should
define the model attribute, override the get_queryset method to return only
the instances the user should be able to view, and override/define can_*
methods to verify a user's permission to perform a particular action.
'''
model = None
def __init__(self, user):
self.user = user
def get_queryset(self):
return self.model.objects.filter(active=True).distinct()
def can_read(self, obj):
if obj:
if hasattr(obj, "active") and obj.active:
if hasattr(obj, "is_valid") and not obj.is_valid:
return False
return True
elif hasattr(obj, "is_active") and obj.is_active:
return True
else:
return False
return True
def can_add(self, data):
return self.user.is_staff
def can_change(self, obj, data):
if hasattr(obj, 'owner_id'):
return obj.owner == self.user or self.user.is_staff
return self.user.is_staff
def can_write(self, obj, data):
# Alias for change.
return self.can_change(obj, data)
def can_admin(self, obj, data):
# Alias for can_change. Can be overridden if admin vs. user change
# permissions need to be different.
return self.can_change(obj, data)
def can_delete(self, obj):
return self.user.is_staff
def can_attach(self, obj, sub_obj, relationship, data,
skip_sub_obj_read_check=False):
if skip_sub_obj_read_check:
return self.can_change(obj, None)
else:
return bool(self.can_change(obj, None) and
check_user_access(self.user, type(sub_obj), 'read', sub_obj))
def can_unattach(self, obj, sub_obj, relationship):
return self.can_change(obj, None)
class UserAccess(BaseAccess):
'''
I can see user records when:
- always
I can change some fields for a user (mainly password) when I am that user.
I can change all fields for a user (admin access) or delete when:
- I'm an admin/staff
'''
model = User
def get_queryset(self):
return self.model.objects.filter(is_active=True, is_admin=False).distinct()
def can_change(self, obj, data):
# A user can be changed if they are themselves, or by org admins or
# superusers. Change permission implies changing only certain fields
# that a user should be able to edit for themselves.
return bool(self.user == obj or self.user.is_staff)
def can_delete(self, obj):
if obj == self.user:
# cannot delete yourself
return False
return self.user.is_staff
class RoleAccess(BaseAccess):
model = Role
def can_attach(self, obj, sub_obj, relationship, data,
skip_sub_obj_read_check=False):
return False
def get_queryset(self):
return self.model.objects.filter(active=True).distinct()
class RoleVersionAccess(BaseAccess):
model = RoleVersion
def get_queryset(self):
return self.model.objects.filter(active=True, role__active=True).distinct()
class NotificationSecretAccess(BaseAccess):
model = NotificationSecret
def can_read(self, obj):
if self.user.is_authenticated() and obj.active and obj.owner.id == self.user.id:
return True
return False
def can_add(self, data):
return self.user.is_authenticated()
def can_change(self, obj, data):
if self.user.is_authenticated() and obj.active and obj.owner.id == self.user.id:
return True
return False
def can_delete(self, obj):
if self.user.is_authenticated() and obj.active and obj.owner.id == self.user.id:
return True
class ImportTaskAccess(BaseAccess):
model = ImportTask
def can_add(self, data):
return self.user.is_authenticated()
def can_change(self, obj, data):
return False
def can_attach(self, obj, sub_obj, relationship, data,
skip_sub_obj_read_check=False):
return False
class ImportTaskMessageAccess(BaseAccess):
model = ImportTaskMessage
def can_add(self, data):
return False
def can_change(self, obj, data):
return False
def can_attach(self, obj, sub_obj, relationship, data,
skip_sub_obj_read_check=False):
return False
def get_queryset(self):
return self.model.objects.filter(active=True, task__active=True).distinct()
class NotificationAccess(BaseAccess):
def can_add(self, data):
return True
def can_change(self, obj, data):
return False
def can_attach(self, obj, sub_obj, relationship, data,
skip_sub_obj_read_check=False):
return False
class SubscriptionAccess(BaseAccess):
def can_add(self, data):
return self.user.is_authenticated()
def can_change(self, data):
return False
def can_delete(self, data):
return self.user.is_authenticated()
class StargazerAccess(BaseAccess):
def can_add(self, data):
return self.user.is_authenticated()
def can_change(self, data):
return False
def can_delete(self, data):
return self.user.is_authenticated()
register_access(User, UserAccess)
register_access(Role, RoleAccess)
register_access(RoleVersion, RoleVersionAccess)
register_access(ImportTask, ImportTaskAccess)
register_access(ImportTaskMessage, ImportTaskMessageAccess)
register_access(NotificationSecret, NotificationSecretAccess)
register_access(Notification, NotificationAccess)
register_access(Subscription, SubscriptionAccess)
register_access(Stargazer, StargazerAccess)
|
chouseknecht/galaxy
|
galaxy/api/access.py
|
Python
|
apache-2.0
| 8,206
|
[
"Galaxy"
] |
2dabe225b128b49f66181f28b35e45f9ca3f861d19ef5108348a4ca4218c69a5
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pathlib
from setuptools import find_packages, setup
here = pathlib.Path(__file__).parent.resolve()
install_requires = (here / 'requirements.txt').read_text(encoding='utf-8').splitlines()
setup(
install_requires=install_requires,
package_dir={'': 'lib',
'ansible_test': 'test/lib/ansible_test'},
packages=find_packages('lib') + find_packages('test/lib'),
entry_points={
'console_scripts': [
'ansible=ansible.cli.adhoc:main',
'ansible-config=ansible.cli.config:main',
'ansible-console=ansible.cli.console:main',
'ansible-doc=ansible.cli.doc:main',
'ansible-galaxy=ansible.cli.galaxy:main',
'ansible-inventory=ansible.cli.inventory:main',
'ansible-playbook=ansible.cli.playbook:main',
'ansible-pull=ansible.cli.pull:main',
'ansible-vault=ansible.cli.vault:main',
'ansible-connection=ansible.cli.scripts.ansible_connection_cli_stub:main',
],
},
)
|
sysadmin75/ansible
|
setup.py
|
Python
|
gpl-3.0
| 1,116
|
[
"Galaxy"
] |
880b819d96cafe1a2c564030c295a0e83b398fd4cadf79c9af6f27399db0db7d
|
__author__ = 'joon'
import sys
sys.path.insert(0, 'src')
sys.path.insert(0, 'lib')
sys.path.insert(0, 'ResearchTools')
from imports.import_caffe import *
from caffetools.netblocks import get_learned_param, get_frozen_param, conv_relu, max_pool, conv
param = (get_frozen_param(), get_learned_param())
def vgg_conv1s(n, bottom, learn=1):
n.conv1_1, n.relu1_1 = conv_relu(bottom, 3, 64, pad=1, param=param[learn])
n.conv1_2, n.relu1_2 = conv_relu(n.relu1_1, 3, 64, pad=1, param=param[learn])
return n.relu1_2
def vgg_conv2s(n, bottom, learn=1):
n.conv2_1, n.relu2_1 = conv_relu(bottom, 3, 128, pad=1, param=param[learn])
n.conv2_2, n.relu2_2 = conv_relu(n.relu2_1, 3, 128, pad=1, param=param[learn])
return n.relu2_2
def vgg_conv3s(n, bottom, learn=1):
n.conv3_1, n.relu3_1 = conv_relu(bottom, 3, 256, pad=1, param=param[learn])
n.conv3_2, n.relu3_2 = conv_relu(n.relu3_1, 3, 256, pad=1, param=param[learn])
n.conv3_3, n.relu3_3 = conv_relu(n.relu3_2, 3, 256, pad=1, param=param[learn])
return n.relu3_3
def vgg_conv4s(n, bottom, learn=1):
n.conv4_1, n.relu4_1 = conv_relu(bottom, 3, 512, pad=1, param=param[learn])
n.conv4_2, n.relu4_2 = conv_relu(n.relu4_1, 3, 512, pad=1, param=param[learn])
n.conv4_3, n.relu4_3 = conv_relu(n.relu4_2, 3, 512, pad=1, param=param[learn])
return n.relu4_3
def deeplab_conv5s(n, bottom, learn=1):
n.conv5_1, n.relu5_1 = conv_relu(bottom, 3, 512, pad=2, dilation=2, param=param[learn])
n.conv5_2, n.relu5_2 = conv_relu(n.relu5_1, 3, 512, pad=2, dilation=2, param=param[learn])
n.conv5_3, n.relu5_3 = conv_relu(n.relu5_2, 3, 512, pad=2, dilation=2, param=param[learn])
return n.relu5_3
def deeplab_fc6(n, bottom, learn=1):
n.fc6, n.relu6 = conv_relu(bottom, 3, 1024, pad=12, dilation=12, param=param[learn])
n.drop6 = L.Dropout(n.relu6)
return n.drop6
def deeplab_fc7(n, bottom, learn=1):
n.fc7, n.relu7 = conv_relu(bottom, 1, 1024, param=param[learn])
n.drop7 = L.Dropout(n.relu7)
return n.drop7
def deeplab_fc8(n, bottom, learn=1):
n.fc8_voc12 = L.Convolution(bottom, kernel_size=1, num_output=21,
param=[dict(lr_mult=10 * learn, decay_mult=1 * learn),
dict(lr_mult=20 * learn, decay_mult=0 * learn)],
weight_filler=dict(type='gaussian', std=0.01),
bias_filler=dict(type='constant', value=0)
)
return n.fc8_voc12
def deeplab_layers(n):
conv1 = vgg_conv1s(n, n.data)
n.pool1 = max_pool(conv1, ks=3, stride=2, pad=1)
conv2 = vgg_conv2s(n, n.pool1)
n.pool2 = max_pool(conv2, ks=3, stride=2, pad=1)
conv3 = vgg_conv3s(n, n.pool2)
n.pool3 = max_pool(conv3, ks=3, stride=2, pad=1)
conv4 = vgg_conv4s(n, n.pool3)
n.pool4 = max_pool(conv4, ks=3, stride=1, pad=1)
conv5 = deeplab_conv5s(n, n.pool4)
n.pool5 = max_pool(conv5, ks=3, stride=1, pad=1)
fc6 = deeplab_fc6(n, n.pool5)
fc7 = deeplab_fc7(n, fc6)
scoremap = deeplab_fc8(n, fc7)
return scoremap
def densesoftmaxloss(n, scoremap, label):
n.loss = L.Python(
scoremap,
label,
module='caffetools.losslayers', layer='DenseSoftmax', ntop=1, param_str=str(dict(
)),
loss_weight=1,
)
return n.loss
def deeplab(conf, control, phase):
# setup the python data layer
n = caffe.NetSpec()
if phase == 'train':
n.data, n.label = L.Python(module='caffetools.datalayers', layer='DeepLabData', ntop=2, param_str=str(dict(
control=control,
conf=conf,
)))
elif phase == 'test':
n.data = L.DummyData(num=1, channels=3, height=conf['input_size'], width=conf['input_size'])
else:
raise NotImplementedError
scoremap = deeplab_layers(n)
if phase == 'train':
loss = densesoftmaxloss(n, scoremap, n.label)
return str(n.to_proto())
|
coallaoh/GuidedLabelling
|
src/segmentation/networks.py
|
Python
|
mit
| 3,993
|
[
"Gaussian"
] |
934586785699cd5e63f2babe2ae41e0a76fd883edfe8c8cbaedcbc46ccd1086c
|
import cherrypy
import SOAPpy
import re
import random
from blast_html import *
server = SOAPpy.SOAPProxy("hatfull12.bio.pitt.edu:31415/")
###########################################################
class orderedList:
###########################################################
def __init__(self,items):
self.myList = []
for item in items:
self.myList.append(Node(item))
self.myList.sort()
def get_list(self):
returnList = []
for item in self.myList:
returnList.append(item.item)
return returnList
###########################################################
class Node:
def __init__(self,item):
self.item = item
def __cmp__(self,other):
if type(self.item) == str:
this = int(self.item)
other = int(other.item)
if this>other:
return 1
if this<other:
return -1
else:
return 0
if type(self.item) == tuple:
this = self.item[0]
other = other.item[0]
if this.rfind("gp")!= -1 and other.rfind("gp")!= -1:
thisTuple = this.split("gp")
otherTuple = other.split("gp")
thisFirst = thisTuple[0]
otherFirst = otherTuple[0]
try: thisLast = float(thisTuple[1].replace(")",""))
except: thisLast = thisTuple[1].replace(")","")
try: otherLast = float(otherTuple[1].replace(")",""))
except: otherLast = otherTuple[1].replace(")","")
if thisFirst<otherFirst:
return -1
elif thisFirst>otherFirst:
return 1
elif thisLast==otherLast:
return 0
elif thisLast<otherLast:
return -1
elif thisLast>otherLast:
return 1
else:
if this>other:
return 1
if this<other:
return -1
else:
return 0
class webPham:
###########################################################
head = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"><html><head>
<style type="text/css">
/*
Theme Name: Pool
Theme URI: http://www.lamateporunyogur.net/pool
Description: A two columns blue theme for the best CMS, WordPress.
Author: Borja Fernandez
Author URI: http://www.lamateporunyogur.net
Version: 1.0.7
The CSS, XHTML and design is released under GPL:
http://www.opensource.org/licenses/gpl-license.php
Changelog:
v1.0
First Release
v1.0.1
Fixed search bug position
v1.0.2
Fixed search bug
Added links.php
Changed archives.php
v1.0.3
Remove cursor: pointer; from header
v1.0.4
Bug report from Nilson Cain fixed
Class image center fixed
Search form moved from header
Changelog are now in style.css. Changelog.txt removed.
Added logo with .psd file
Other changes in css
v1.0.5
Move comments in index
Other changes in css
v1.0.6
Changed sidebar
v1.0.7
Fixed rss feed and trackack uri if comments are closed (Thanks soteke)
*/
body {
background: url(http://hatfull12.bio.pitt.edu:80/static/css/pool/images/bg.gif);
color: #333;
font-family: "Trebuchet MS", "Bitstream Vera Serif", Utopia, "Times New Roman", times, serif;
margin: 0;
padding-top: 31px;
}
/* Structure Divs */
#content {
background: #fff;
border: 1px solid #9C9C9C;
margin: 0 auto;
padding: 5px;
/*width: 795px;*/
}
#header {
background: #8EBAFD url(http://hatfull12.bio.pitt.edu:80/static/css/pool/images/logo.gif) no-repeat;
height: 150px;
margin: 0;
padding: 0;
}
#headerimg {
margin: 0;
height: 200px;
width: 100%;
}
.narrowcolumn {
float: left;
padding: 0 0 20px 45px;
margin: 0px 0 0;
width: 555px;
}
.widecolumn {
padding: 10px 0 20px 0;
margin: 5px 0 0 150px;
width: 450px;
}
.post {
margin: 0 0 4px;
text-align: justify;
}
.widecolumn .post {
margin: 0;
}
.narrowcolumn .postmetadata {
padding-top: 5px;
}
.widecolumn .postmetadata {
margin: 30px 0;
}
.widecolumn .smallattachment {
text-align: center;
float: left;
width: 128px;
margin: 5px 5px 5px 0px;
}
.widecolumn .attachment {
text-align: center;
margin: 5px 0px;
}
.postmetadata {
clear: left;
}
#footer {
padding: 0 0 0 1px;
margin: 0 auto;
width: 760px;
clear: both;
}
#footer p {
margin: 0;
padding: 20px 0;
text-align: center;
}
/* End Structure */
/* Begin Headers */
h1 {
padding-top: 15px;
padding-bottom: 5px;
margin: 0;
}
.description {
text-align: center;
}
h2 {
margin: 30px 0 0;
}
h2.pagetitle {
margin-top: 30px;
text-align: center;
}
#sidebar h2 {
margin: 5px 0 0;
padding: 0;
}
h3 {
padding: 0;
margin: 30px 0 0;
}
h3.comments {
padding: 0;
margin: 40px auto 20px ;
}
/* End Headers */
/* Begin Images */
p img {
padding: 0;
max-width: 100%;
}
/* Using 'class="alignright"' on an image will (who would've
thought?!) align the image to the right. And using 'class="centered',
will of course center the image. This is much better than using
align="center", being much more futureproof (and valid) */
img.centered {
display: block;
margin-left: auto;
margin-right: auto;
}
img.alignright {
padding: 4px;
margin: 0 0 2px 7px;
display: inline;
}
img.alignleft {
padding: 4px;
margin: 0 7px 2px 0;
display: inline;
}
.alignright {
float: right;
}
.alignleft {
float: left
}
/* End Images */
#nav {
margin: 0 auto;
padding: 0;
top: 0px;
right: 0px;
/*padding: 3px;*/
font-size: 14pt;
}
.navitem, li.navitem {
border: 1px solid gray;
display:inline;
list-style-type: none;
color: white;
background: goldenrod;
}
a.navitem, a.selected_navitem {
border: 0px;
padding-left: 5px;
padding-right: 5px;
}
li.navitem:hover{
blackground:black;
}
.selected_navitem{
display:inline;
list-style-type: none;
/*padding-left: 5px;
padding-right: 5px;*/
color: #0090DA;
border: 1px solid gray;
background: blanchedalmond;
}
li.selected_navitem{
color: #0090DA;
/*border-top: 1px solid black;
border-left: 1px solid black;
border-right: 1px solid black; */
}
a.navitem:hover, .navitem:hover {
display:inline;
color: white;
/*border: 1px dashed black;*/
background: #0090DA;
}
#pages {
background: #B8D4FF;
font-size: 12px;
margin: 0;
padding: 15px 0 6px 20px;
}
#page {
background-color: #B8D4FF;
margin: 0 auto;
padding: 0;
width: 760px;
border: 1px solid #959596;
}
.post {
margin: 0 0 40px;
text-align: justify;
}
#searchform {
float: right;
margin: 0;
padding: 0;
position: relative;
right: 10px;
top: 0px;
/*top: -22px;*/
}
#noticias {
float: left;
margin: 0;
padding: 0 0 20px 20px;
width: 550px;
}
#sidebar {
float: right;
font-size: 11px;
line-height: 1.5em;
margin: 0;
padding: 0 10px;
width: 170px;
}
#credits {
background: #D5E5FE;
font-family: Small Fonts, VT100, Arial, Helvetica;
font-size: 9px;
margin: 0;
padding: 5px 20px;
text-align: center;
text-transform: uppercase;
}
/* Config Structure Divs */
/* Header */
#header h1 {
font-size: 26px;
letter-spacing: 0.1em;
margin: 0;
padding: 20px 0 20px 30px;
width: 300px;
}
#header a, #header a:hover {
background: transparent;
color: #fff;
text-decoration: none;
}
/* Pages */
#pages li {
display: inline;
list-style-type: none;
}
#pages ul, ol {
margin: 0;
padding: 0;
}
#pages a {
background: #fff;
color: #1E4C62;
font-weight: bold;
margin: 0 3px 0 0;
padding: 6px 10px;
}
#pages a:hover {
background: #8EBAFD;
color: #fff;
}
.current_page_item a, .current_page_item a:hover {
background: #8EBAFD !important;
color: #fff !important;
}
/* Search */
#searchform input {
border: 1px solid #66A8CC;
font-size: 12px;
padding: 2px;
width: 160px;
}
/* Noticias */
#noticias p, #noticias ul, #noticias ol {
font-size: 13px;
line-height: 1.6em;
}
#noticias ul {
list-style-type: circle;
margin: 0 0 0 30px;
padding: 0;
}
#noticias li {
margin: 0;
padding: 0;
}
#noticias h2, #noticias h2 a {
color: #0090DA;
font-size: 18px;
font-weight: normal;
margin: 50px 0 0 0;
padding: 0;
text-decoration: none;
}
#noticias h2 a:hover {
background: transparent;
color: #6EB9E0;
}
#noticias h3 {
color: #016CA3;
font-size: 15px;
font-weight: normal;
margin: 0;
padding: 20px 0 5px 0;
}
#noticias small {
font-family: Arial, Helvetica, Sans-Serif;
font-size: 11px;
}
.feedback {
color: #898A8A;
font-size: 12px;
margin: 0;
padding: 0 20px;
text-align: center;
}
/* Entrada */
.entrada {
margin: 0;
padding: 0;
}
/* Comments */
#commentlist {
list-style-type: none;
margin: 0;
padding: 0;
}
#commentlist li {
margin: 10px 0;
padding: 5px 10px;
}
#commentlist p {
margin: 0;
padding: 0;
}
#commentlist small {
font-size: 11px;
}
.class_comment1 { background: #E9E9EA; border: 1px solid #E0DEDE; }
.class_comment2 { background: #F4F3F3; border: 1px solid #E0DEDE; }
#comments, #postcomment {
color: #0090DA;
font-size: 14px !important;
font-weight: normal;
margin: 40px 0 10px 10px;
text-transform: uppercase;
}
#commentform {
background: #D3E4FF;
border: 1px solid #D8D8D8;
padding: 5px 20px;
}
#commentform input, #commentform textarea {
background: #F9FBFF;
border: 1px solid #B8D4FF;
font-size: 12px;
padding: 1px;
width: 100%;
}
#commentform input:focus, #commentform textarea:focus {
background: #EEF5FF;
}
#commentform #submit {
margin: 0;
width: 30%;
}
/* Sidebar */
#sidebar h3 {
background: url(http://hatfull12.bio.pitt.edu:80/static/css/pool/images/dot.gif) repeat-x bottom;
color: #174B65;
font-size: 11px;
font-weight: normal;
letter-spacing: 0.2em;
margin: 0;
padding: 0;
text-transform: uppercase;
}
#sidebar ul, #sidebar ol {
list-style: square;
margin: 0;
padding: 5px;
}
#sidebar li, #sidebar li:hover {
/*border: 1px dashed;*/
margin: 0;
padding: 0;
}
#sidebar a {
color: #0B76AE;
}
#sidebar a:hover {
background: url(http://hatfull12.bio.pitt.edu:80/static/css/pool/images/dot.gif) repeat-x bottom;
color: #0B76AE;
}
#sidebar div {
margin: 20px 0;
padding: 0;
}
/* Credits */
#credits a {
color: #3E708A;
}
#credits a:hover {
background: transparent;
color: #0090DA;
}
#credits p {
margin: 0;
padding: 0;
}
/* General */
a {
color: #0B76AE;
text-decoration: none;
}
a:hover {
background: #0090DA;
color: #fff;
}
acronym, abbr, span.caps {
cursor: help;
border-bottom: 1px dotted #000;
}
blockquote {
background: #E3F5FE url(http://hatfull12.bio.pitt.edu:80/static/css/pool/images/blockquote.png) no-repeat bottom left;
padding: 5px 20px 30px 20px;
margin: 1em;
} /* Idea from ShadedGrey of http://wpthemes.info/ */
cite {
text-decoration: none;
}
code {
font-family: 'Courier New', Courier, Fixed, sans-serif;
font-size: 1.1em;
}
img {
border: 0;
}
h4 {
color: #858585;
}
/* Float and Clear */
div.floatleft {
float: left;
}
div.floatright {
float: right;
}
div.both {
clear: both;
}
/* Images align */
img.border {
border: 1px solid #C6C6C6;
padding: 4px;
margin: 0;
}
img.border:hover {
background: #E3F5FE;
}
img.center {
display: block;
margin: auto;
}
img.alignright {
float: right;
padding: 4px;
margin: 0 0 2px 7px;
display: inline;
}
img.alignleft {
float: left;
padding: 4px;
margin: 0 7px 2px 0;
display: inline;
}
/* Text align */
.center {
text-align: center;
}
.alignright {
text-align: right;
}
.alignleft {
text-align: left;
}
/* from here to bottom was pasted from default style.css */
.navigation {
display: block;
text-align: center;
margin-top: 10px;
margin-bottom: 60px;
}
/* End Various Tags & Classes*/
#wpcombar {
position: absolute;
top: 0;
left: 0;
background: #14568a;
width: 100%;
height: 30px;
font-family: "Lucida Grande", "Lucida Sans Unicode", Tahoma, Verdana;
font-size: 12px;
}
#quicklinks ul {
list-style: none;
margin: 0;
padding: 0;
}
#quicklinks li {
float: left;
}
#quicklinks a {
display: block;
padding: .5em 1em;
color: #c3def1;
text-decoration: none;
font-weight: normal;
}
#quicklinks a:hover {
background: #6da6d1;
color: black;
}
#loginout {
position: absolute;
right: 1em;
top: 7px;
margin: 0;
padding: 0;
color: #c3def1;
}
#loginout strong {
color: #c3def1;
}
#loginout a, #loginout a:hover {
color: white;
}
#statusmessage {
position: absolute;
top: -1px;
left:200px;
right: 200px;
z-index: 5000;
}
#statusmessage div {
width: 400px;
margin: 0px auto;
height: 50px;
padding: 35px 10px 10px 55px;
background-repeat: no-repeat;
background-position: left;
font-size: 18px;
opacity: .75;
filter: alpha(opacity=75);
}
#statusmessage div.success {
background-color: #99CC99;
border: 1px solid #006633;
background-image: url("http://hatfull12.bio.pitt.edu:80/static/images/dialog-information.png");
}
#statusmessage div.error {
background-color: #C00;
border: 1px solid #600;
background-image: url("http://hatfull12.bio.pitt.edu:80/static/images/dialog-error.png");
}
.btn { background-color: transparent; border: 0; padding: 0;
color: #1E4C62;
font-weight: bold;
margin: 0 3px 0 0;
padding: 6px 10px;}
.sectionBorder {
border: 2px dashed #1E4C62;
margin: 5%;
padding: 5%;
text-align: left;
}
.sectionOuterBorder {
border: 1px solid grey;
text-align: center;
margin: 5%;
padding: 5%;
}
</style>
<title>The Phameration Station</title>
</head>"""
###########################################################
sidebar = ( """
<script type="text/javascript">
function getVar(name)
{
get_string = document.location.search;
return_value = '';
do { //This loop is made to catch all instances of any get variable.
name_index = get_string.indexOf(name + '=');
if(name_index != -1)
{
get_string = get_string.substr(name_index + name.length + 1, get_string.length - name_index);
end_of_value = get_string.indexOf('&');
if(end_of_value != -1)
value = get_string.substr(0, end_of_value);
else
value = get_string;
if(return_value == '' || value == '')
return_value += value;
else
return_value += ', ' + value;
}
} while(name_index != -1)
//Restores all the blank spaces.
space = return_value.indexOf('+');
while(space != -1)
{
return_value = return_value.substr(0, space) + ' ' +
return_value.substr(space + 1, return_value.length);
space = return_value.indexOf('+');
}
return(return_value);
}
</script>
<script type="text/javascript">
function update_size_n_adj(){
try{
var past_trans = getVar("transparency");
var past_size = getVar("size");
//alert(past_trans + ":" + past_size);
if (past_trans == "ON"){
document.getElementById("adjustment_on").checked = true
}
else{
document.getElementById("adjustment_off").checked = true
}
if (past_size == "BIG"){
document.getElementById("size_big").checked = true
}
else if (past_size == "MID"){
document.getElementById("size_mid").checked = true
}
else{
document.getElementById("size_sml").checked = true
}
}
catch(e){alert(e);}
}
</script>
<script type="text/javascript">
function get_size_n_adj(action,id,target){
var form=document.getElementById(id);
form.action = action;
if (target == 'none'){
form.target = "";
}
else{
form.target = target;
}
var adjustment=document.getElementsByName("adjustment");
var size = document.getElementsByName("size");
for (i=0;i<adjustment.length;i++){
if (adjustment[i].checked == true){
var retAdj = adjustment[i].value;
}
}
for (i=0;i<size.length;i++){
if (size[i].checked == true){
var retSize = size[i].value;
}
}
try{
document.getElementById("pham_input_trans").value=retAdj;
}
catch(e){}
try{
document.getElementById("pham_input_size").value=retSize;
}
catch(e){}
try{
document.getElementById("multiple_genome_input_trans").value=retAdj;
}
catch(e){}
try{
document.getElementById("multiple_genome_input_size").value=retSize;
}
catch(e){}
try{
document.getElementById("genome_input_trans").value=retAdj;
}
catch(e){}
try{
document.getElementById("genome_input_size").value=retSize;
}
catch(e){}
try{
document.getElementById("list_trans").value=retAdj;
}
catch(e){}
try{
document.getElementById("list_size").value=retSize;
}
catch(e){}
form.submit();
}
</script>""" +
'''<div id="sidebar">''' +
"""
<h3>Transparency</h3>
<br/>
<FORM id="select_adjustment" action = "">
On:
<input type="radio"
name="adjustment" id="adjustment_on" value="ON"/>
<br/>
Off:
<input type="radio" checked="checked"
name="adjustment" id="adjustment_off" value="OFF"/>
<br/>
</FORM>""" +
"<br/>" +
"""
<h3>PhamCircle Size</h3>
<br/>
<FORM id="select_size" action="">
Small:
<input type="radio" checked="checked"
name="size" id="size_sml" value="SML"/>
<br/>
Medium:
<input type="radio"
name="size" id="size_mid" value="MID"/>
<br/>
Large:
<input type="radio"
name="size" id="size_big" value="BIG"/>
<br/>
</FORM>""" +
"</div>")
###########################################################
body = """<body onLoad="update_size_n_adj();"><div id="page"><div id="header"><h1><a href="http://hatfull12.bio.pitt.edu:80/">PhageHunter Program</a></h1></div>"""
###########################################################
foot = """<div id="footer"><p><a>Phameration Station version 1.0</a></p></div></div></body></html>"""
###########################################################
linkbar = """<ul id="nav">
<li class="navitem"><a href="http://hatfull12.bio.pitt.edu" class="navitem">Blog</a></li>
<li class="navitem"><a href="http://hatfull12.bio.pitt.edu/wiki" class="navitem">Wiki</a></li>
<li class="selected_navitem"><a href="/" class="selected_navitem">Phamerator</a></li>
<li class="navitem"><a href="http://hatfull12.bio.pitt.edu/PhageHuntingWorkshop2007/calendar.html" class="navitem">Calendar</a></li>
</ul>"""
###########################################################
java = """
<script type="text/javascript">
function check_uncheck_all(){
var box = document.getElementsByName("box");
var boxList=document.getElementsByName("checked");
var bool = box[0].checked;
for (i=0;i<boxList.length;i++){
boxList[i].checked = bool;
}
}
function get_action(button){
var form=document.getElementById("choose_seq")
var boxList=document.getElementsByName("checked");
var numChecked = 0;
for (i=0;i<boxList.length;i++){
if (boxList[i].checked == true){
numChecked++;
}
}
if (button == "blast"){
if(numChecked > 1){
var r=confirm("Realise you are attempting to blast multiple sequences(this may take a while). Many pop-up windows may ensue (make sure your browser is set to accept pop-ups from this site). Do you want to continue?"); if(r == true){
form.action = "blast_page";
form.submit();
}
}
else{
form.action = "blast_page";
form.submit();
}
}
else{
form.action = "get_fasta";
form.submit();
}
}
</script>"""
###########################################################
def index(self):
return (self.head + self.body + self.linkbar + self.sidebar +
'''<div id="content" class="narrowcolumn"> '''+
'''<div class="post" id="post-1">''' +
"<br/>" + self.pham_input() +
"<br/>" + self.genome_input() +
"<br/>" + self.multiple_genome_input() +
"</div>" +
"</div>" +
self.foot)
index.exposed = True
###########################################################
def circle(self,*args,**kw):
pham = kw["pham"]
trans = kw["transparency"]
size = kw["circle_size"]
if trans == "OFF":
adjustment = 0.0
else:
adjustment = 1.0
if size == "SML":
radius = 150
elif size == "MID":
radius = 200
else:
radius = 300
phams = []
for item in server.get_unique_phams():
item = str(item)
phams.append(item)
if pham not in phams:
return "Pham " + pham + " Does Not Exist"
server.create_pham_circle(pham,False,adjustment,radius)
circle = server.get_phamCircle()
#cherrypy.response.headers['Content-Type'] = 'application/xhtml+xml'
cherrypy.response.headers['Content-Type'] = 'image/svg+xml'
return circle
circle.exposed = True
###########################################################
def genomeMap(self,*args,**kw):
try:
genome = kw["genome"]
except:
return "Please Select At Least One Genome..."
if type(genome) == str:
genome = [genome]
phageID = []
for gen in genome:
ID = server.get_PhageID_from_name(gen)
phageID.append({"PhageID":ID,"display_reversed":False})
server.create_genome_map(phageID)
genMap = server.get_genome_map()
cherrypy.response.headers['Content-Type'] = 'image/svg+xml'
return genMap
genomeMap.exposed = True
###########################################################
def phamList(self,*args,**kw):
genome = kw["genome"]
phageID = server.get_PhageID_from_name(genome)
phamList = orderedList(server.get_phams_from_PhageID(phageID)).get_list()
head = """<h1>Phamilies for """ + genome + """</h1><ul>"""
foot = """</ul>"""
middle = '''<form name="phamList" id="listPhams" action="circle" method="GET" target="_blank" >'''
for item in phamList:
if server.get_number_of_pham_members(item) >= 2:
#<a href="''' + "/circle/?pham=" + item + '''">''' + item + """</a>
middle = middle + '''<li><a>''' + '''<input type="submit" class="btn" onclick="get_size_n_adj('circle','listPhams','_blank')" name="pham" value="''' + item + '''">''' +"""</a></li>""" + "<br/>"
else:
middle = middle + '''<li>''' + item + """ (single member)</li>""" + "<br/>"
middle = middle + """<input type="hidden" id="list_trans" name="transparency" value=""/><input type="hidden" id="list_size" name="circle_size" value=""/></form>"""
return (self.head + self.body + self.linkbar + self.sidebar + '''<div id="content" class="narrowcolumn"> '''+
'''<div class="post" id="post-1">''' + head + middle + foot + """</div></div>""" + self.foot)
phamList.exposed = True
###########################################################
def geneList(self,*args,**kw):
exp = re.compile('\d+[.]*\d*$')
genome = kw["genome"]
phageID = server.get_PhageID_from_name(genome)
geneList = server.get_genes_from_PhageID(phageID)
head = """<html><body><h1>Genes for """ + genome + """</h1><ul>"""
foot = """</ul></body></html>"""
middle = '''<form name="geneList" id="listGenes" action="circle" method="GET" target="_blank" >'''
tempList = []
for item in geneList:
tempList.append(("gp" + str((exp.search(server.get_gene_name_from_GeneID(item))).group().strip()),item))
geneList = orderedList(tempList).get_list()
for item in geneList:
if server.get_number_of_pham_members(server.get_pham_from_GeneID(item[1])) >=2:
middle = middle + '''<li>''' + item[0] + " Pham" + '''<a><input type="submit" class="btn" onclick="get_size_n_adj('circle','listGenes','_blank')" name="pham" value="''' + str(server.get_pham_from_GeneID(item[1])) + '''"></a>''' + """</li>""" + "<br>"
else:
middle = middle + '''<li>''' + item[0] + """ (single member pham)</li>""" + "<br/>"
middle = middle + """<input type="hidden" id="list_trans" name="transparency" value=""/><input type="hidden" id="list_size" name="circle_size" value=""/></form>"""
return (self.head + self.body + self.linkbar + self.sidebar + '''<div id="content" class="narrowcolumn"> '''+
'''<div class="post" id="post-1">''' + head + middle + foot + """</div></div>""" + self.foot)
geneList.exposed = True
###########################################################
def choose_seq_by_pham(self,*args,**kw):
pham = kw["pham"]
phams = []
for item in server.get_unique_phams():
item = str(item)
phams.append(item)
if pham not in phams:
return "Pham " + pham + " Does Not Exist"
pass
head = """<html>""" + self.java + """<body><h1>Genes for Phamily """ + pham + """</h1><form id="choose_seq" action="" method="GET" target="_blank"><ul>"""
foot = """</ul><input type="button" value="Get Fasta" onclick="get_action('fasta')" /><input type="checkBox" name="box" onclick="check_uncheck_all()"/><a>check/uncheck all</a><br/><input type="button" value="Blast" onclick="get_action('blast')" /></form></body></html>"""
middle = ""
members = server.get_members_of_pham(pham)
for item in members:
middle = middle + '''<li><input type="checkBox" name="checked" value="''' + item + '''"/><a>''' + server.get_phage_name_from_PhageID(server.get_PhageID_from_GeneID(item)) + " : " + server.get_gene_name_from_GeneID(item).replace(server.get_phage_name_from_GeneID(item),"") + '''</a></li>''' + "<br/>"
return (self.head + self.body + self.linkbar + self.sidebar + '''<div id="content" class="narrowcolumn"> '''+
'''<div class="post" id="post-1">''' + head + middle + foot + """</div></div>""" + self.foot)
choose_seq_by_pham.exposed = True
###########################################################
def choose_seq_by_genome(self,*args,**kw):
genome = kw["genome"]
head = """<html>""" + self.java + """<body><h1>Genes for """ + genome + """</h1><form id="choose_seq" action="" method="GET" target="_blank"><ul>"""
foot = """</ul><input type="button" value="Get Fasta" onclick="get_action('fasta')" /><input type="checkBox" name="box" onclick="check_uncheck_all()"/><a>check/uncheck all</a><br/><input type="button" value="Blast" onclick="get_action('blast')" /></form></body></html>"""
middle = ""
phageID = server.get_PhageID_from_name(genome)
geneList = server.get_genes_from_PhageID(phageID)
exp = re.compile('\d+[.]*\d*$')
tempList = []
for item in geneList:
tempList.append(("gp" + str((exp.search(server.get_gene_name_from_GeneID(item)).group().strip())),item))
geneList = orderedList(tempList).get_list()
for item in geneList:
middle = middle + '''<li><input type="checkBox" name="checked" value="''' + item[1] + '''"/><a href="''' + "/get_fasta/?checked=" + item[1] + '''">''' + item[0] + """</a></li>""" + "<br/>"
return (self.head + self.body + self.linkbar + self.sidebar + '''<div id="content" class="narrowcolumn"> '''+
'''<div class="post" id="post-1">''' + head + middle + foot + """</div></div>""" + self.foot)
choose_seq_by_genome.exposed = True
###########################################################
def blast_page(self,*args,**kw):
try:
checked = kw["checked"]
if type(checked) == str:
title = " " + server.get_phage_name_from_GeneID(checked) + "_" + server.get_gene_name_from_GeneID(checked).replace(server.get_phage_name_from_GeneID(checked),"") + "</title>"
html = blast_html().get_blast_page(server.get_translation_from_GeneID(checked))
html = html.replace('''</title>''',title)
return html
else:
returnString = """<html><body onLoad="open_multiple();">
<script type="text/javascript">
function open_multiple() {"""
for item in checked:
returnString = returnString + '''window.open("/blast_page?checked=''' + item + '''");'''
returnString = returnString + """window.close();}</script></body></html>"""
return returnString
except:
return "Please Select at Least One Gene..."
blast_page.exposed = True
###########################################################
def get_fasta(self,*args,**kw):
try:
checked = kw["checked"]
except:
return "Please Select at Least One Gene..."
returnString = ""
if type(checked) == str:
checked = [checked]
for item in checked:
returnString = returnString + '>' + server.get_phage_name_from_GeneID(item).replace('Mycobacterium phage', '').replace('Mycobacteriophage','') + '|' + server.get_gene_name_from_GeneID(item).replace(server.get_phage_name_from_GeneID(item),"") + '\n' + '<br/>' + server.get_translation_from_GeneID(item) + '\n' + '<br/>'
return returnString
get_fasta.exposed = True
###########################################################
def pham_input(self):
return """<div class="sectionOuterBorder"><h2>Phamily Input</h2><div class="sectionBorder"><form name="pham_input" form id="phamInput" action="circle" method="GET" target="_blank">Pham:
<input type="text" name="pham" /><br/>
<input type="submit" value="Draw Pham Circle" onclick="get_size_n_adj('circle','phamInput','_blank')"/>
<input type="button" value="Get/Blast Sequence" onclick="get_size_n_adj('choose_seq_by_pham','phamInput','none')"/>
<input type="hidden" id="pham_input_trans" name="transparency" value=""/>
<input type="hidden" id="pham_input_size" name="circle_size" value=""/>
</form></div></div>"""
###########################################################
def multiple_genome_input(self):
head = """<div class="sectionOuterBorder"><h2>Multiple Genome Input</h2><div class="sectionBorder"><form action="genomeMap" method="GET" target="_blank"><select multiple size="20" name="genome">"""
foot = """</select><input type="submit" value="Generate Map"/></form></div></div>"""
phages = server.get_phages(name=True)
middle = ""
#try:
# phages.sort()
for name in phages:
middle = middle + "<option>" + name + "</option>"
#except:
#errMsg = "no phages were found in the database"
#middle = "<option>%s</option>" % errMsg
return head + middle + foot
###########################################################
def genome_input(self):
head = """<div class="sectionOuterBorder"><h2>Genome Input</h2><div class="sectionBorder"><form name="genome_input" id="genomeInput" action="" method="GET" ><select name="genome">"""
foot = """</select><br/>
<input type="button" value="List Phams" onclick="get_size_n_adj('phamList','genomeInput','none')"/>
<input type="button" value="List Genes" onclick="get_size_n_adj('geneList','genomeInput','none')" />
<input type="button" value="Get/Blast Sequence" onclick="get_size_n_adj('choose_seq_by_genome','genomeInput','none')"/>
<input type="hidden" id="genome_input_trans" name="transparency" value=""/>
<input type="hidden" id="genome_input_size" name="circle_size" value=""/>
</form></div></div>"""
phages = server.get_phages(name=True)
middle = ""
try:
phages.sort()
for name in phages:
middle = middle + "<option>" + name + "</option>"
except:
errMsg = "no phages were found in the database"
middle = "<option>%s</option>" % errMsg
return head + middle + foot
###########################################################
cherrypy.root = webPham()
if __name__ == '__main__':
cherrypy.config.update(file = 'web-pham.conf')
cherrypy.server.start()
|
byuphamerator/phamerator-dev
|
phamerator/web-pham.py
|
Python
|
gpl-2.0
| 36,182
|
[
"BLAST"
] |
989f325b909f784b04ddf9b05dd2f73431fd0d74a8286382117df1ec1b954f24
|
#!/usr/bin/python
"""generate_functions.py
Yo dawg, I heard you like code generation so I wrote a code
generator to write your code generators!
"""
import sys
import os
input_list = """sqrt
exp
log
abs|Abs|-1
floor
ceil|ceiling|3/2
sin
sinh
asin
asinh
cos
cosh
acos
acosh
tan
tanh
atan
atanh||1/2
csc
sec
cot
coth
acot
acoth||2
sign
factorial
gamma
erf
erfc
erfinv||1/2
erfcinv|||% Note: the erfcinv unit test fails on Octave < 3.8
erfi||0,0|
heaviside|Heaviside|1,1
dirac|DiracDelta|1,0
nextprime||123,127
"""
# todo:
#psi(x)|polygamma(0,x)
#psi(k,x)|polygamma(k,x)
# sec, csc don't have hyperbolic or arc
#sech asec asech
#csch acsc acsch
copyright_block = \
"""%% Copyright (C) 2015 Colin B. Macdonald
%%
%% This file is part of OctSymPy.
%%
%% OctSymPy is free software; you can redistribute it and/or modify
%% it under the terms of the GNU General Public License as published
%% by the Free Software Foundation; either version 3 of the License,
%% or (at your option) any later version.
%%
%% This software is distributed in the hope that it will be useful,
%% but WITHOUT ANY WARRANTY; without even the implied warranty
%% of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
%% the GNU General Public License for more details.
%%
%% You should have received a copy of the GNU General Public
%% License along with this software; see the file COPYING.
%% If not, see <http://www.gnu.org/licenses/>.
"""
def process_input_list(L):
"""replace L with a list of dictionaries"""
LL = L.splitlines();
L = [];
for it in LL:
it = it.split('|')
#print it
f = it[0]
d = {'name':f}
if len(it) >= 2 and it[1] != '':
d['spname'] = it[1]
else:
d['spname'] = f
if len(it) >= 3 and it[2] != '':
testvals = it[2].split(',')
if len(testvals) == 2:
(d['test_in_val'],d['test_out_val']) = testvals
d['out_val_from_oct'] = False
else:
(d['test_in_val'],) = testvals
d['out_val_from_oct'] = True
d['octname'] = f
else:
d['test_in_val'] = '1'
d['out_val_from_oct'] = True
d['octname'] = f
if (len(it) >= 4):
d['extra_code'] = it[3]
else:
d['extra_code'] = ''
L.append(d);
return L
def remove_all(L):
"""FIXME: all a bit hacky, should do better"""
for d in L:
f = d['name'];
fname = '../inst/@sym/%s.m' % f
try:
os.unlink(fname)
except:
True
def autogen_functions(L, where):
for d in L:
f = d['name'];
fname = '%s/@sym/%s.m' % (where,f)
print fname
fd = open(fname, "w")
fd.write(copyright_block)
fd.write('\n%% -*- texinfo -*-\n')
fd.write("%%%% @deftypefn {Function File} {@var{y} =} %s (@var{x})\n" % f)
fd.write("%%%% Symbolic %s function.\n" % f)
fd.write( \
"""%%
%% Note: this file is autogenerated: if you want to edit it, you might
%% want to make changes to 'generate_functions.py' instead.
%%
%% @end deftypefn
%% Author: Colin B. Macdonald
%% Keywords: symbolic
""")
fd.write("function y = %s(x)\n" % f)
#fd.write('\n')
if len(d['extra_code']) > 0:
fd.write("\n %s\n\n" % d['extra_code'])
fd.write(" y = uniop_helper (x, '%s');\n" % d['spname'])
fd.write("end\n")
# tests
fd.write("\n\n%!shared x, d\n")
fd.write("%%! d = %s;\n" % d['test_in_val'])
fd.write("%%! x = sym('%s');\n\n" % d['test_in_val'])
fd.write("%!test\n")
fd.write("%%! f1 = %s(x);\n" % f)
if d['out_val_from_oct']:
fd.write("%%! f2 = %s(d);\n" % f)
else:
fd.write("%%! f2 = %s;\n" % d['test_out_val'])
fd.write("%! assert( abs(double(f1) - f2) < 1e-15 )\n\n")
fd.write("%!test\n")
fd.write("%! D = [d d; d d];\n")
fd.write("%! A = [x x; x x];\n")
fd.write("%%! f1 = %s(A);\n" % f)
if d['out_val_from_oct']:
fd.write("%%! f2 = %s(D);\n" % f)
else:
fd.write("%%! f2 = %s;\n" % d['test_out_val'])
fd.write("%! f2 = [f2 f2; f2 f2];\n")
fd.write("%! assert( all(all( abs(double(f1) - f2) < 1e-15 )))\n")
fd.close()
def print_usage():
print """
Run this script with one argument:
python generate_functions install: make m files in ../inst/@sym
python generate_functions clean: remove them from above
"""
if __name__ == "__main__":
L = process_input_list(input_list)
print sys.argv
if len(sys.argv) <= 1:
print_usage()
elif sys.argv[1] == 'install':
print "***** Generating code for .m files from template ****"
autogen_functions(L, '../inst')
elif sys.argv[1] == 'clean':
print "cleaning up"
remove_all(L)
else:
print_usage()
|
maprieto/octsympy
|
src/generate_functions.py
|
Python
|
gpl-3.0
| 5,011
|
[
"DIRAC"
] |
3e5ce8db649e86613c20838f3a6bdf80384683c79a10ece4ec3de356ae462685
|
#!/usr/bin/python2.6
import numpy
import sys
from osgeo import gdal
from osgeo.gdalconst import *
"""
.. module:: read_file.py
:synopsis: Module used to read input file, and convert data to numpy array
.. moduleauthor:: Daniel Wild <daniel.wild@ga.gov.au>
"""
def readASC(inputFile):
"""
Reads .asc DEM into a GDALDataSet
:param inputFile: The path to an input file (DEM)
:returns: A numpy array
"""
# debug - permit print whole numpy array
# numpy.set_printoptions(threshold=numpy.nan)
inDs = gdal.Open(inputFile, GA_ReadOnly)
return convertToNumpyArray(inDs)
def readNetCDF(inputFile, ncVar):
"""
Reads an NETCDF file into a GDALDataSet
:param inputFile: The path to an input file (NETCDF)
:param ncVar: The name of var to read from NETCDF file
:returns: A numpy array
"""
# DEBUG - permit print whole numpy array
#numpy.set_printoptions(threshold=numpy.nan)
# DEBUG - meaningful errors
gdal.UseExceptions()
inDs = gdal.Open('NETCDF:"'+ inputFile+'":'+ncVar)
return convertToNumpyArray(inDs)
def convertToNumpyArray(gdalDataSet):
"""
Reads GDALDataSet into a numpy array
:param gdalDataSet: a GDAL DataSet to be converted to numpy.array
:returns: A numpy array
"""
if gdalDataSet is None:
print 'Could not open %s'%gdalDataSet
sys.exit(1)
# get raster size
rows = gdalDataSet.RasterYSize
cols = gdalDataSet.RasterXSize
# create empty numpy array
data = numpy.zeros((rows,cols),dtype=numpy.float)
# get the bands and block sizes
inBand = gdalDataSet.GetRasterBand(1)
blockSizes = inBand.GetBlockSize()
xBlockSize = blockSizes[0]
yBlockSize = blockSizes[1]
# loop through the rows
for i in range(0, rows, yBlockSize):
if i + yBlockSize < rows:
numRows = yBlockSize
else:
numRows = rows - i
# loop through the columns
for j in range(0, cols, xBlockSize):
if j + xBlockSize < cols:
numCols = xBlockSize
else:
numCols = cols - j
# read the data in
dd = inBand.ReadAsArray(j, i, numCols, numRows).astype(numpy.float)
# do the calculations
data[i:i+numRows, j:j+numCols] = dd
# DEBUG
#print data
return data
|
dynaryu/Wind_multipliers
|
tests_characterisation/read_file.py
|
Python
|
gpl-3.0
| 2,378
|
[
"NetCDF"
] |
905ae1fb1f5c5eef3d56fc66b677b74eb7b3ec2d0df58f521701dbb613f058e4
|
"""User-friendly public interface to polynomial functions. """
from sympy.core import (
S, Basic, Expr, I, Integer, Add, Mul, Dummy, Tuple, Rational
)
from sympy.core.mul import _keep_coeff
from sympy.core.sympify import (
sympify, SympifyError,
)
from sympy.core.decorators import (
_sympifyit,
)
from sympy.polys.polyclasses import DMP
from sympy.polys.polyutils import (
basic_from_dict,
_sort_gens,
_unify_gens,
_dict_reorder,
_dict_from_expr,
_parallel_dict_from_expr,
)
from sympy.polys.rationaltools import (
together,
)
from sympy.polys.rootisolation import (
dup_isolate_real_roots_list,
)
from sympy.polys.distributedpolys import (
sdp_from_dict, sdp_div,
)
from sympy.polys.groebnertools import (
sdp_groebner, matrix_fglm,
)
from sympy.polys.monomialtools import (
Monomial, monomial_key,
)
from sympy.polys.polyerrors import (
OperationNotSupported, DomainError,
CoercionFailed, UnificationFailed,
GeneratorsNeeded, PolynomialError,
MultivariatePolynomialError,
ExactQuotientFailed,
PolificationFailed,
ComputationFailed,
GeneratorsError,
)
from sympy.utilities import group
import sympy.polys
import sympy.mpmath
from sympy.polys.domains import FF, QQ
from sympy.polys.constructor import construct_domain
from sympy.polys import polyoptions as options
from sympy.core.compatibility import iterable
class Poly(Expr):
"""Generic class for representing polynomial expressions. """
__slots__ = ['rep', 'gens']
is_commutative = True
is_Poly = True
def __new__(cls, rep, *gens, **args):
"""Create a new polynomial instance out of something useful. """
opt = options.build_options(gens, args)
if 'order' in opt:
raise NotImplementedError("'order' keyword is not implemented yet")
if iterable(rep, exclude=str):
if isinstance(rep, dict):
return cls._from_dict(rep, opt)
else:
return cls._from_list(list(rep), opt)
else:
rep = sympify(rep)
if rep.is_Poly:
return cls._from_poly(rep, opt)
else:
return cls._from_expr(rep, opt)
@classmethod
def new(cls, rep, *gens):
"""Construct :class:`Poly` instance from raw representation. """
if not isinstance(rep, DMP):
raise PolynomialError("invalid polynomial representation: %s" % rep)
elif rep.lev != len(gens)-1:
raise PolynomialError("invalid arguments: %s, %s" % (rep, gens))
obj = Basic.__new__(cls)
obj.rep = rep
obj.gens = gens
return obj
@classmethod
def from_dict(cls, rep, *gens, **args):
"""Construct a polynomial from a ``dict``. """
opt = options.build_options(gens, args)
return cls._from_dict(rep, opt)
@classmethod
def from_list(cls, rep, *gens, **args):
"""Construct a polynomial from a ``list``. """
opt = options.build_options(gens, args)
return cls._from_list(rep, opt)
@classmethod
def from_poly(cls, rep, *gens, **args):
"""Construct a polynomial from a polynomial. """
opt = options.build_options(gens, args)
return cls._from_poly(rep, opt)
@classmethod
def from_expr(cls, rep, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return cls._from_expr(rep, opt)
@classmethod
def _from_dict(cls, rep, opt):
"""Construct a polynomial from a ``dict``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded("can't initialize from 'dict' without generators")
level = len(gens)-1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
for monom, coeff in rep.iteritems():
rep[monom] = domain.convert(coeff)
return cls.new(DMP.from_dict(rep, level, domain), *gens)
@classmethod
def _from_list(cls, rep, opt):
"""Construct a polynomial from a ``list``. """
gens = opt.gens
if not gens:
raise GeneratorsNeeded("can't initialize from 'list' without generators")
elif len(gens) != 1:
raise MultivariatePolynomialError("'list' representation not supported")
level = len(gens)-1
domain = opt.domain
if domain is None:
domain, rep = construct_domain(rep, opt=opt)
else:
rep = map(domain.convert, rep)
return cls.new(DMP.from_list(rep, level, domain), *gens)
@classmethod
def _from_poly(cls, rep, opt):
"""Construct a polynomial from a polynomial. """
if cls != rep.__class__:
rep = cls.new(rep.rep, *rep.gens)
gens = opt.gens
field = opt.field
domain = opt.domain
if gens and rep.gens != gens:
if set(rep.gens) != set(gens):
return cls._from_expr(rep.as_expr(), opt)
else:
rep = rep.reorder(*gens)
if 'domain' in opt and domain:
rep = rep.set_domain(domain)
elif field is True:
rep = rep.to_field()
return rep
@classmethod
def _from_expr(cls, rep, opt):
"""Construct a polynomial from an expression. """
rep, opt = _dict_from_expr(rep, opt)
return cls._from_dict(rep, opt)
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep, self.gens)
def __hash__(self):
return super(Poly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols
set([x])
>>> Poly(x**2 + y).free_symbols
set([x, y])
>>> Poly(x**2 + y, x).free_symbols
set([x, y])
"""
symbols = set([])
for gen in self.gens:
symbols |= gen.free_symbols
return symbols | self.free_symbols_in_domain
@property
def free_symbols_in_domain(self):
"""
Free symbols of the domain of ``self``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1).free_symbols_in_domain
set()
>>> Poly(x**2 + y).free_symbols_in_domain
set()
>>> Poly(x**2 + y, x).free_symbols_in_domain
set([y])
"""
domain, symbols = self.rep.dom, set()
if domain.is_Composite:
for gen in domain.gens:
symbols |= gen.free_symbols
elif domain.is_EX:
for coeff in self.coeffs():
symbols |= coeff.free_symbols
return symbols
@property
def args(self):
"""
Don't mess up with the core.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).args
(x**2 + 1,)
"""
return (self.as_expr(),)
@property
def gen(self):
"""
Return the principal generator.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).gen
x
"""
return self.gens[0]
@property
def domain(self):
"""Get the ground domain of ``self``. """
return self.get_domain()
@property
def zero(self):
"""Return zero polynomial with ``self``'s properties. """
return self.new(self.rep.zero(self.rep.lev, self.rep.dom), *self.gens)
@property
def one(self):
"""Return one polynomial with ``self``'s properties. """
return self.new(self.rep.one(self.rep.lev, self.rep.dom), *self.gens)
@property
def unit(self):
"""Return unit polynomial with ``self``'s properties. """
return self.new(self.rep.unit(self.rep.lev, self.rep.dom), *self.gens)
def unify(f, g):
"""
Make ``f`` and ``g`` belong to the same domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f, g = Poly(x/2 + 1), Poly(2*x + 1)
>>> f
Poly(1/2*x + 1, x, domain='QQ')
>>> g
Poly(2*x + 1, x, domain='ZZ')
>>> F, G = f.unify(g)
>>> F
Poly(1/2*x + 1, x, domain='QQ')
>>> G
Poly(2*x + 1, x, domain='QQ')
"""
_, per, F, G = f._unify(g)
return per(F), per(G)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if isinstance(f.rep, DMP) and isinstance(g.rep, DMP):
gens = _unify_gens(f.gens, g.gens)
dom, lev = f.rep.dom.unify(g.rep.dom, gens), len(gens)-1
if f.gens != gens:
f_monoms, f_coeffs = _dict_reorder(f.rep.to_dict(), f.gens, gens)
if f.rep.dom != dom:
f_coeffs = [ dom.convert(c, f.rep.dom) for c in f_coeffs ]
F = DMP(dict(zip(f_monoms, f_coeffs)), dom, lev)
else:
F = f.rep.convert(dom)
if g.gens != gens:
g_monoms, g_coeffs = _dict_reorder(g.rep.to_dict(), g.gens, gens)
if g.rep.dom != dom:
g_coeffs = [ dom.convert(c, g.rep.dom) for c in g_coeffs ]
G = DMP(dict(zip(g_monoms, g_coeffs)), dom, lev)
else:
G = g.rep.convert(dom)
else:
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove]+gens[remove+1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def per(f, rep, gens=None, remove=None):
"""
Create a Poly out of the given representation.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x, y
>>> from sympy.polys.polyclasses import DMP
>>> a = Poly(x**2 + 1)
>>> a.per(DMP([ZZ(1), ZZ(1)], ZZ), gens=[y])
Poly(y + 1, y, domain='ZZ')
"""
if gens is None:
gens = f.gens
if remove is not None:
gens = gens[:remove]+gens[remove+1:]
if not gens:
return f.rep.dom.to_sympy(rep)
return f.__class__.new(rep, *gens)
def set_domain(f, domain):
"""Set the ground domain of ``f``. """
opt = options.build_options(f.gens, {'domain': domain})
return f.per(f.rep.convert(opt.domain))
def get_domain(f):
"""Get the ground domain of ``f``. """
return f.rep.dom
def set_modulus(f, modulus):
"""
Set the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(5*x**2 + 2*x - 1, x).set_modulus(2)
Poly(x**2 + 1, x, modulus=2)
"""
modulus = options.Modulus.preprocess(modulus)
return f.set_domain(FF(modulus))
def get_modulus(f):
"""
Get the modulus of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, modulus=2).get_modulus()
2
"""
domain = f.get_domain()
if not domain.has_CharacteristicZero:
return Integer(domain.characteristic())
else:
raise PolynomialError("not a polynomial over a Galois field")
def _eval_subs(f, old, new):
"""Internal implementation of :func:`subs`. """
if old in f.gens:
if new.is_number:
return f.eval(old, new)
else:
try:
return f.replace(old, new)
except PolynomialError:
pass
return f.as_expr().subs(old, new)
def exclude(f):
"""
Remove unnecessary generators from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import a, b, c, d, x
>>> Poly(a + x, a, b, c, d, x).exclude()
Poly(a + x, a, x, domain='ZZ')
"""
J, new = f.rep.exclude()
gens = []
for j in range(len(f.gens)):
if j not in J:
gens.append(f.gens[j])
return f.per(new, gens=gens)
def replace(f, x, y=None):
"""
Replace ``x`` with ``y`` in generators list.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 1, x).replace(x, y)
Poly(y**2 + 1, y, domain='ZZ')
"""
if y is None:
if f.is_univariate:
x, y = f.gen, x
else:
raise PolynomialError("syntax supported only in univariate case")
if x == y:
return f
if x in f.gens and y not in f.gens:
dom = f.get_domain()
if not dom.is_Composite or y not in dom.gens:
gens = list(f.gens)
gens[gens.index(x)] = y
return f.per(f.rep, gens=gens)
raise PolynomialError("can't replace %s with %s in %s" % (x, y, f))
def reorder(f, *gens, **args):
"""
Efficiently apply new order of generators.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y**2, x, y).reorder(y, x)
Poly(y**2*x + x**2, y, x, domain='ZZ')
"""
opt = options.Options((), args)
if not gens:
gens = _sort_gens(f.gens, opt=opt)
elif set(f.gens) != set(gens):
raise PolynomialError("generators list can differ only up to order of elements")
rep = dict(zip(*_dict_reorder(f.rep.to_dict(), f.gens, gens)))
return f.per(DMP(rep, f.rep.dom, len(gens)-1), gens=gens)
def ltrim(f, gen):
"""
Remove dummy generators from the "left" of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(y**2 + y*z**2, x, y, z).ltrim(y)
Poly(y**2 + y*z**2, y, z, domain='ZZ')
"""
rep = f.as_dict(native=True)
j = f._gen_to_level(gen)
terms = {}
for monom, coeff in rep.iteritems():
monom = monom[j:]
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("can't left trim %s" % f)
gens = f.gens[j:]
return f.new(DMP.from_dict(terms, len(gens)-1, f.rep.dom), *gens)
def has_only_gens(f, *gens):
"""
Return ``True`` if ``Poly(f, *gens)`` retains ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x*y + 1, x, y, z).has_only_gens(x, y)
True
>>> Poly(x*y + z, x, y, z).has_only_gens(x, y)
False
"""
f_gens = list(f.gens)
indices = set([])
for gen in gens:
try:
index = f_gens.index(gen)
except ValueError:
raise GeneratorsError("%s doesn't have %s as generator" % (f, gen))
else:
indices.add(index)
for monom in f.monoms():
for i, elt in enumerate(monom):
if i not in indices and elt:
return False
return True
def to_ring(f):
"""
Make the ground domain a ring.
Examples
========
>>> from sympy import Poly, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, domain=QQ).to_ring()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'to_ring'):
result = f.rep.to_ring()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_ring')
return f.per(result)
def to_field(f):
"""
Make the ground domain a field.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x, domain=ZZ).to_field()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_field'):
result = f.rep.to_field()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_field')
return f.per(result)
def to_exact(f):
"""
Make the ground domain exact.
Examples
========
>>> from sympy import Poly, RR
>>> from sympy.abc import x
>>> Poly(x**2 + 1.0, x, domain=RR).to_exact()
Poly(x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'to_exact'):
result = f.rep.to_exact()
else: # pragma: no cover
raise OperationNotSupported(f, 'to_exact')
return f.per(result)
def retract(f, field=None):
"""
Recalculate the ground domain of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 1, x, domain='QQ[y]')
>>> f
Poly(x**2 + 1, x, domain='QQ[y]')
>>> f.retract()
Poly(x**2 + 1, x, domain='ZZ')
>>> f.retract(field=True)
Poly(x**2 + 1, x, domain='QQ')
"""
dom, rep = construct_domain(f.as_dict(zero=True), field=field)
return f.from_dict(rep, f.gens, domain=dom)
def slice(f, x, m, n=None):
"""Take a continuous subsequence of terms of ``f``. """
if n is None:
j, m, n = 0, x, m
else:
j = f._gen_to_level(x)
m, n = int(m), int(n)
if hasattr(f.rep, 'slice'):
result = f.rep.slice(m, n, j)
else: # pragma: no cover
raise OperationNotSupported(f, 'slice')
return f.per(result)
def coeffs(f, order=None):
"""
Returns all non-zero coefficients from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x + 3, x).coeffs()
[1, 2, 3]
"""
return [ f.rep.dom.to_sympy(c) for c in f.rep.coeffs(order=order) ]
def monoms(f, order=None):
"""
Returns all non-zero monomials from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).monoms()
[(2, 0), (1, 2), (1, 1), (0, 1)]
"""
return f.rep.monoms(order=order)
def terms(f, order=None):
"""
Returns all non-zero terms from ``f`` in lex order.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 + x*y + 3*y, x, y).terms()
[((2, 0), 1), ((1, 2), 2), ((1, 1), 1), ((0, 1), 3)]
"""
return [ (m, f.rep.dom.to_sympy(c)) for m, c in f.rep.terms(order=order) ]
def all_coeffs(f):
"""
Returns all coefficients from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_coeffs()
[1, 0, 2, -1]
"""
return [ f.rep.dom.to_sympy(c) for c in f.rep.all_coeffs() ]
def all_monoms(f):
"""
Returns all monomials from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_monoms()
[(3,), (2,), (1,), (0,)]
"""
return f.rep.all_monoms()
def all_terms(f):
"""
Returns all terms from a univariate polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x - 1, x).all_terms()
[((3,), 1), ((2,), 0), ((1,), 2), ((0,), -1)]
"""
return [ (m, f.rep.dom.to_sympy(c)) for m, c in f.rep.all_terms() ]
def termwise(f, func, *gens, **args):
"""
Apply a function to all terms of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> def func((k,), coeff):
... return coeff//10**(2-k)
>>> Poly(x**2 + 20*x + 400).termwise(func)
Poly(x**2 + 2*x + 4, x, domain='ZZ')
"""
terms = {}
for monom, coeff in f.terms():
result = func(monom, coeff)
if isinstance(result, tuple):
monom, coeff = result
else:
coeff = result
if coeff:
if monom not in terms:
terms[monom] = coeff
else:
raise PolynomialError("%s monomial was generated twice" % monom)
return f.from_dict(terms, *(gens or f.gens), **args)
def length(f):
"""
Returns the number of non-zero terms in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x - 1).length()
3
"""
return len(f.as_dict())
def as_dict(f, native=False, zero=False):
"""
Switch to a ``dict`` representation.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x*y**2 - y, x, y).as_dict()
{(0, 1): -1, (1, 2): 2, (2, 0): 1}
"""
if native:
return f.rep.to_dict(zero=zero)
else:
return f.rep.to_sympy_dict(zero=zero)
def as_list(f, native=False):
"""Switch to a ``list`` representation. """
if native:
return f.rep.to_list()
else:
return f.rep.to_sympy_list()
def as_expr(f, *gens):
"""
Convert a polynomial an expression.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2 + 2*x*y**2 - y, x, y)
>>> f.as_expr()
x**2 + 2*x*y**2 - y
>>> f.as_expr({x: 5})
10*y**2 - y + 25
>>> f.as_expr(5, 6)
379
"""
if not gens:
gens = f.gens
elif len(gens) == 1 and isinstance(gens[0], dict):
mapping = gens[0]
gens = list(f.gens)
for gen, value in mapping.iteritems():
try:
index = gens.index(gen)
except ValueError:
raise GeneratorsError("%s doesn't have %s as generator" % (f, gen))
else:
gens[index] = value
return basic_from_dict(f.rep.to_sympy_dict(), *gens)
def lift(f):
"""
Convert algebraic coefficients to rationals.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**2 + I*x + 1, x, extension=I).lift()
Poly(x**4 + 3*x**2 + 1, x, domain='QQ')
"""
if hasattr(f.rep, 'lift'):
result = f.rep.lift()
else: # pragma: no cover
raise OperationNotSupported(f, 'lift')
return f.per(result)
def deflate(f):
"""
Reduce degree of ``f`` by mapping ``x_i**m`` to ``y_i``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3 + 1, x, y).deflate()
((3, 2), Poly(x**2*y + x + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'deflate'):
J, result = f.rep.deflate()
else: # pragma: no cover
raise OperationNotSupported(f, 'deflate')
return J, f.per(result)
def inject(f, front=False):
"""
Inject ground domain generators into ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x)
>>> f.inject()
Poly(x**2*y + x*y**3 + x*y + 1, x, y, domain='ZZ')
>>> f.inject(front=True)
Poly(y**3*x + y*x**2 + y*x + 1, y, x, domain='ZZ')
"""
dom = f.rep.dom
if dom.is_Numerical:
return f
elif not dom.is_Poly:
raise DomainError("can't inject generators over %s" % dom)
if hasattr(f.rep, 'inject'):
result = f.rep.inject(front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'inject')
if front:
gens = dom.gens + f.gens
else:
gens = f.gens + dom.gens
return f.new(result, *gens)
def eject(f, *gens):
"""
Eject selected generators into the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2*y + x*y**3 + x*y + 1, x, y)
>>> f.eject(x)
Poly(x*y**3 + (x**2 + x)*y + 1, y, domain='ZZ[x]')
>>> f.eject(y)
Poly(y*x**2 + (y**3 + y)*x + 1, x, domain='ZZ[y]')
"""
dom = f.rep.dom
if not dom.is_Numerical:
raise DomainError("can't eject generators over %s" % dom)
n, k = len(f.gens), len(gens)
if f.gens[:k] == gens:
_gens, front = f.gens[n-k:], True
elif f.gens[-k:] == gens:
_gens, front = f.gens[:n-k], False
else:
raise NotImplementedError("can only eject front or back generators")
dom = dom.inject(*gens)
if hasattr(f.rep, 'eject'):
result = f.rep.eject(dom, front=front)
else: # pragma: no cover
raise OperationNotSupported(f, 'eject')
return f.new(result, *_gens)
def terms_gcd(f):
"""
Remove GCD of terms from the polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**6*y**2 + x**3*y, x, y).terms_gcd()
((3, 1), Poly(x**3*y + 1, x, y, domain='ZZ'))
"""
if hasattr(f.rep, 'terms_gcd'):
J, result = f.rep.terms_gcd()
else: # pragma: no cover
raise OperationNotSupported(f, 'terms_gcd')
return J, f.per(result)
def add_ground(f, coeff):
"""
Add an element of the ground domain to ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).add_ground(2)
Poly(x + 3, x, domain='ZZ')
"""
if hasattr(f.rep, 'add_ground'):
result = f.rep.add_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'add_ground')
return f.per(result)
def sub_ground(f, coeff):
"""
Subtract an element of the ground domain from ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).sub_ground(2)
Poly(x - 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'sub_ground'):
result = f.rep.sub_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub_ground')
return f.per(result)
def mul_ground(f, coeff):
"""
Multiply ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 1).mul_ground(2)
Poly(2*x + 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'mul_ground'):
result = f.rep.mul_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul_ground')
return f.per(result)
def quo_ground(f, coeff):
"""
Quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).quo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).quo_ground(2)
Poly(x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'quo_ground'):
result = f.rep.quo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo_ground')
return f.per(result)
def exquo_ground(f, coeff):
"""
Exact quotient of ``f`` by a an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x + 4).exquo_ground(2)
Poly(x + 2, x, domain='ZZ')
>>> Poly(2*x + 3).exquo_ground(2)
Traceback (most recent call last):
...
ExactQuotientFailed: 2 does not divide 3 in ZZ
"""
if hasattr(f.rep, 'exquo_ground'):
result = f.rep.exquo_ground(coeff)
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo_ground')
return f.per(result)
def abs(f):
"""
Make all coefficients in ``f`` positive.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).abs()
Poly(x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'abs'):
result = f.rep.abs()
else: # pragma: no cover
raise OperationNotSupported(f, 'abs')
return f.per(result)
def neg(f):
"""
Negate all coefficients in ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).neg()
Poly(-x**2 + 1, x, domain='ZZ')
>>> -Poly(x**2 - 1, x)
Poly(-x**2 + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'neg'):
result = f.rep.neg()
else: # pragma: no cover
raise OperationNotSupported(f, 'neg')
return f.per(result)
def add(f, g):
"""
Add two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).add(Poly(x - 2, x))
Poly(x**2 + x - 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x) + Poly(x - 2, x)
Poly(x**2 + x - 1, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.add_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'add'):
result = F.add(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'add')
return per(result)
def sub(f, g):
"""
Subtract two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).sub(Poly(x - 2, x))
Poly(x**2 - x + 3, x, domain='ZZ')
>>> Poly(x**2 + 1, x) - Poly(x - 2, x)
Poly(x**2 - x + 3, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.sub_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'sub'):
result = F.sub(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'sub')
return per(result)
def mul(f, g):
"""
Multiply two polynomials ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).mul(Poly(x - 2, x))
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x)*Poly(x - 2, x)
Poly(x**3 - 2*x**2 + x - 2, x, domain='ZZ')
"""
g = sympify(g)
if not g.is_Poly:
return f.mul_ground(g)
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'mul'):
result = F.mul(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'mul')
return per(result)
def sqr(f):
"""
Square a polynomial ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).sqr()
Poly(x**2 - 4*x + 4, x, domain='ZZ')
>>> Poly(x - 2, x)**2
Poly(x**2 - 4*x + 4, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqr'):
result = f.rep.sqr()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqr')
return f.per(result)
def pow(f, n):
"""
Raise ``f`` to a non-negative power ``n``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x - 2, x).pow(3)
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
>>> Poly(x - 2, x)**3
Poly(x**3 - 6*x**2 + 12*x - 8, x, domain='ZZ')
"""
n = int(n)
if hasattr(f.rep, 'pow'):
result = f.rep.pow(n)
else: # pragma: no cover
raise OperationNotSupported(f, 'pow')
return f.per(result)
def pdiv(f, g):
"""
Polynomial pseudo-division of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pdiv(Poly(2*x - 4, x))
(Poly(2*x + 4, x, domain='ZZ'), Poly(20, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pdiv'):
q, r = F.pdiv(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pdiv')
return per(q), per(r)
def prem(f, g):
"""
Polynomial pseudo-remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).prem(Poly(2*x - 4, x))
Poly(20, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'prem'):
result = F.prem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'prem')
return per(result)
def pquo(f, g):
"""
Polynomial pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).pquo(Poly(2*x - 4, x))
Poly(2*x + 4, x, domain='ZZ')
>>> Poly(x**2 - 1, x).pquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pquo'):
result = F.pquo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'pquo')
return per(result)
def pexquo(f, g):
"""
Polynomial exact pseudo-quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).pexquo(Poly(2*x - 2, x))
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x**2 + 1, x).pexquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'pexquo'):
try:
result = F.pexquo(G)
except ExactQuotientFailed, exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'pexquo')
return per(result)
def div(f, g, auto=True):
"""
Polynomial division with remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x))
(Poly(1/2*x + 1, x, domain='QQ'), Poly(5, x, domain='QQ'))
>>> Poly(x**2 + 1, x).div(Poly(2*x - 4, x), auto=False)
(Poly(0, x, domain='ZZ'), Poly(x**2 + 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'div'):
q, r = F.div(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'div')
if retract:
try:
Q, R = q.to_ring(), r.to_ring()
except CoercionFailed:
pass
else:
q, r = Q, R
return per(q), per(r)
def rem(f, g, auto=True):
"""
Computes the polynomial remainder of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly, ZZ, QQ
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x))
Poly(5, x, domain='ZZ')
>>> Poly(x**2 + 1, x).rem(Poly(2*x - 4, x), auto=False)
Poly(x**2 + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'rem'):
r = F.rem(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'rem')
if retract:
try:
r = r.to_ring()
except CoercionFailed:
pass
return per(r)
def quo(f, g, auto=True):
"""
Computes polynomial quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).quo(Poly(2*x - 4, x))
Poly(1/2*x + 1, x, domain='QQ')
>>> Poly(x**2 - 1, x).quo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'quo'):
q = F.quo(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'quo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def exquo(f, g, auto=True):
"""
Computes polynomial exact quotient of ``f`` by ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).exquo(Poly(x - 1, x))
Poly(x + 1, x, domain='ZZ')
>>> Poly(x**2 + 1, x).exquo(Poly(2*x - 4, x))
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
dom, per, F, G = f._unify(g)
retract = False
if auto and dom.has_Ring and not dom.has_Field:
F, G = F.to_field(), G.to_field()
retract = True
if hasattr(f.rep, 'exquo'):
try:
q = F.exquo(G)
except ExactQuotientFailed, exc:
raise exc.new(f.as_expr(), g.as_expr())
else: # pragma: no cover
raise OperationNotSupported(f, 'exquo')
if retract:
try:
q = q.to_ring()
except CoercionFailed:
pass
return per(q)
def _gen_to_level(f, gen):
"""Returns level associated with the given generator. """
if isinstance(gen, int):
length = len(f.gens)
if -length <= gen < length:
if gen < 0:
return length + gen
else:
return gen
else:
raise PolynomialError("-%s <= gen < %s expected, got %s" % (length, length, gen))
else:
try:
return list(f.gens).index(sympify(gen))
except ValueError:
raise PolynomialError("a valid generator expected, got %s" % gen)
def degree(f, gen=0):
"""
Returns degree of ``f`` in ``x_j``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree()
2
>>> Poly(x**2 + y*x + y, x, y).degree(y)
1
"""
j = f._gen_to_level(gen)
if hasattr(f.rep, 'degree'):
return f.rep.degree(j)
else: # pragma: no cover
raise OperationNotSupported(f, 'degree')
def degree_list(f):
"""
Returns a list of degrees of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).degree_list()
(2, 1)
"""
if hasattr(f.rep, 'degree_list'):
return f.rep.degree_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'degree_list')
def total_degree(f):
"""
Returns the total degree of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + y*x + 1, x, y).total_degree()
2
>>> Poly(x + y**5, x, y).total_degree()
5
"""
if hasattr(f.rep, 'total_degree'):
return f.rep.total_degree()
else: # pragma: no cover
raise OperationNotSupported(f, 'total_degree')
def homogeneous_order(f):
"""
Returns the homogeneous order of ``f``.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. This degree is
the homogeneous order of ``f``. If you only want to check if a
polynomial is homogeneous, then use :func:`Poly.is_homogeneous`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**5 + 2*x**3*y**2 + 9*x*y**4)
>>> f.homogeneous_order()
5
"""
if hasattr(f.rep, 'homogeneous_order'):
return f.rep.homogeneous_order()
else: # pragma: no cover
raise OperationNotSupported(f, 'homogeneous_order')
def LC(f, order=None):
"""
Returns the leading coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(4*x**3 + 2*x**2 + 3*x, x).LC()
4
"""
if order is not None:
return f.coeffs(order)[0]
if hasattr(f.rep, 'LC'):
result = f.rep.LC()
else: # pragma: no cover
raise OperationNotSupported(f, 'LC')
return f.rep.dom.to_sympy(result)
def TC(f):
"""
Returns the trailing coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).TC()
0
"""
if hasattr(f.rep, 'TC'):
result = f.rep.TC()
else: # pragma: no cover
raise OperationNotSupported(f, 'TC')
return f.rep.dom.to_sympy(result)
def EC(f, order=None):
"""
Returns the last non-zero coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 + 2*x**2 + 3*x, x).EC()
3
"""
if hasattr(f.rep, 'coeffs'):
return f.coeffs(order)[-1]
else: # pragma: no cover
raise OperationNotSupported(f, 'EC')
def nth(f, *N):
"""
Returns the ``n``-th coefficient of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**3 + 2*x**2 + 3*x, x).nth(2)
2
>>> Poly(x**3 + 2*x*y**2 + y**2, x, y).nth(1, 2)
2
"""
if hasattr(f.rep, 'nth'):
result = f.rep.nth(*map(int, N))
else: # pragma: no cover
raise OperationNotSupported(f, 'nth')
return f.rep.dom.to_sympy(result)
def LM(f, order=None):
"""
Returns the leading monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LM()
x**2*y**0
"""
return Monomial(f.monoms(order)[0], f.gens)
def EM(f, order=None):
"""
Returns the last non-zero monomial of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).EM()
x**0*y**1
"""
return Monomial(f.monoms(order)[-1], f.gens)
def LT(f, order=None):
"""
Returns the leading term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).LT()
(x**2*y**0, 4)
"""
monom, coeff = f.terms(order)[0]
return Monomial(monom, f.gens), coeff
def ET(f, order=None):
"""
Returns the last non-zero term of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(4*x**2 + 2*x*y**2 + x*y + 3*y, x, y).ET()
(x**0*y**1, 3)
"""
monom, coeff = f.terms(order)[-1]
return Monomial(monom, f.gens), coeff
def max_norm(f):
"""
Returns maximum norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).max_norm()
3
"""
if hasattr(f.rep, 'max_norm'):
result = f.rep.max_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'max_norm')
return f.rep.dom.to_sympy(result)
def l1_norm(f):
"""
Returns l1 norm of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(-x**2 + 2*x - 3, x).l1_norm()
6
"""
if hasattr(f.rep, 'l1_norm'):
result = f.rep.l1_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'l1_norm')
return f.rep.dom.to_sympy(result)
def clear_denoms(f, convert=False):
"""
Clear denominators, but keep the ground domain.
Examples
========
>>> from sympy import Poly, S, QQ
>>> from sympy.abc import x
>>> f = Poly(x/2 + S(1)/3, x, domain=QQ)
>>> f.clear_denoms()
(6, Poly(3*x + 2, x, domain='QQ'))
>>> f.clear_denoms(convert=True)
(6, Poly(3*x + 2, x, domain='ZZ'))
"""
if not f.rep.dom.has_Field:
return S.One, f
dom = f.rep.dom.get_ring()
if hasattr(f.rep, 'clear_denoms'):
coeff, result = f.rep.clear_denoms()
else: # pragma: no cover
raise OperationNotSupported(f, 'clear_denoms')
coeff, f = dom.to_sympy(coeff), f.per(result)
if not convert:
return coeff, f
else:
return coeff, f.to_ring()
def rat_clear_denoms(f, g):
"""
Clear denominators in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = Poly(x**2/y + 1, x)
>>> g = Poly(x**3 + y, x)
>>> p, q = f.rat_clear_denoms(g)
>>> p
Poly(x**2 + y, x, domain='ZZ[y]')
>>> q
Poly(y*x**3 + y**2, x, domain='ZZ[y]')
"""
dom, per, f, g = f._unify(g)
f = per(f)
g = per(g)
if not (dom.has_Field and dom.has_assoc_Ring):
return f, g
a, f = f.clear_denoms(convert=True)
b, g = g.clear_denoms(convert=True)
f = f.mul_ground(b)
g = g.mul_ground(a)
return f, g
def integrate(f, *specs, **args):
"""
Computes indefinite integral of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).integrate()
Poly(1/3*x**3 + x**2 + x, x, domain='QQ')
>>> Poly(x*y**2 + x, x, y).integrate((0, 1), (1, 0))
Poly(1/2*x**2*y**2 + 1/2*x**2, x, y, domain='QQ')
"""
if args.get('auto', True) and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'integrate'):
if not specs:
return f.per(f.rep.integrate(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.integrate(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'integrate')
def diff(f, *specs):
"""
Computes partial derivative of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + 2*x + 1, x).diff()
Poly(2*x + 2, x, domain='ZZ')
>>> Poly(x*y**2 + x, x, y).diff((0, 0), (1, 1))
Poly(2*x*y, x, y, domain='ZZ')
"""
if hasattr(f.rep, 'diff'):
if not specs:
return f.per(f.rep.diff(m=1))
rep = f.rep
for spec in specs:
if type(spec) is tuple:
gen, m = spec
else:
gen, m = spec, 1
rep = rep.diff(int(m), f._gen_to_level(gen))
return f.per(rep)
else: # pragma: no cover
raise OperationNotSupported(f, 'diff')
def eval(f, x, a=None, auto=True):
"""
Evaluate ``f`` at ``a`` in the given variable.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> Poly(x**2 + 2*x + 3, x).eval(2)
11
>>> Poly(2*x*y + 3*x + y + 2, x, y).eval(x, 2)
Poly(5*y + 8, y, domain='ZZ')
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f.eval({x: 2})
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f.eval({x: 2, y: 5})
Poly(2*z + 31, z, domain='ZZ')
>>> f.eval({x: 2, y: 5, z: 7})
45
>>> f.eval((2, 5))
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
"""
if a is None:
if isinstance(x, dict):
mapping = x
for gen, value in mapping.iteritems():
f = f.eval(gen, value)
return f
elif isinstance(x, (tuple, list)):
values = x
if len(values) > len(f.gens):
raise ValueError("too many values provided")
for gen, value in zip(f.gens, values):
f = f.eval(gen, value)
return f
else:
j, a = 0, x
else:
j = f._gen_to_level(x)
if not hasattr(f.rep, 'eval'): # pragma: no cover
raise OperationNotSupported(f, 'eval')
try:
result = f.rep.eval(a, j)
except CoercionFailed:
if not auto:
raise DomainError("can't evaluate at %s in %s" % (a, f.rep.dom))
else:
domain, [a] = construct_domain([a])
f = f.set_domain(domain)
result = f.rep.eval(a, j)
return f.per(result, remove=j)
def __call__(f, *values):
"""
Evaluate ``f`` at the give values.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y, z
>>> f = Poly(2*x*y + 3*x + y + 2*z, x, y, z)
>>> f(2)
Poly(5*y + 2*z + 6, y, z, domain='ZZ')
>>> f(2, 5)
Poly(2*z + 31, z, domain='ZZ')
>>> f(2, 5, 7)
45
"""
return f.eval(values)
def half_gcdex(f, g, auto=True):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).half_gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'), Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'half_gcdex'):
s, h = F.half_gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'half_gcdex')
return per(s), per(h)
def gcdex(f, g, auto=True):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**4 - 2*x**3 - 6*x**2 + 12*x + 15
>>> g = x**3 + x**2 - 4*x - 4
>>> Poly(f).gcdex(Poly(g))
(Poly(-1/5*x + 3/5, x, domain='QQ'),
Poly(1/5*x**2 - 6/5*x + 2, x, domain='QQ'),
Poly(x + 1, x, domain='QQ'))
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'gcdex'):
s, t, h = F.gcdex(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcdex')
return per(s), per(t), per(h)
def invert(f, g, auto=True):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).invert(Poly(2*x - 1, x))
Poly(-4/3, x, domain='QQ')
>>> Poly(x**2 - 1, x).invert(Poly(x - 1, x))
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
dom, per, F, G = f._unify(g)
if auto and dom.has_Ring:
F, G = F.to_field(), G.to_field()
if hasattr(f.rep, 'invert'):
result = F.invert(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'invert')
return per(result)
def revert(f, n):
"""Compute ``f**(-1)`` mod ``x**n``. """
if hasattr(f.rep, 'revert'):
result = f.rep.revert(int(n))
else: # pragma: no cover
raise OperationNotSupported(f, 'revert')
return f.per(result)
def subresultants(f, g):
"""
Computes the subresultant PRS sequence of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).subresultants(Poly(x**2 - 1, x))
[Poly(x**2 + 1, x, domain='ZZ'),
Poly(x**2 - 1, x, domain='ZZ'),
Poly(-2, x, domain='ZZ')]
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'subresultants'):
result = F.subresultants(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'subresultants')
return map(per, result)
def resultant(f, g):
"""
Computes the resultant of ``f`` and ``g`` via PRS.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 1, x).resultant(Poly(x**2 - 1, x))
4
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'resultant'):
result = F.resultant(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'resultant')
return per(result, remove=0)
def discriminant(f):
"""
Computes the discriminant of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + 2*x + 3, x).discriminant()
-8
"""
if hasattr(f.rep, 'discriminant'):
result = f.rep.discriminant()
else: # pragma: no cover
raise OperationNotSupported(f, 'discriminant')
return f.per(result, remove=0)
def cofactors(f, g):
"""
Returns the GCD of ``f`` and ``g`` and their cofactors.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).cofactors(Poly(x**2 - 3*x + 2, x))
(Poly(x - 1, x, domain='ZZ'),
Poly(x + 1, x, domain='ZZ'),
Poly(x - 2, x, domain='ZZ'))
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'cofactors'):
h, cff, cfg = F.cofactors(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'cofactors')
return per(h), per(cff), per(cfg)
def gcd(f, g):
"""
Returns the polynomial GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).gcd(Poly(x**2 - 3*x + 2, x))
Poly(x - 1, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'gcd'):
result = F.gcd(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'gcd')
return per(result)
def lcm(f, g):
"""
Returns polynomial LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 1, x).lcm(Poly(x**2 - 3*x + 2, x))
Poly(x**3 - 2*x**2 - x + 2, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'lcm'):
result = F.lcm(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'lcm')
return per(result)
def trunc(f, p):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 + 3*x**2 + 5*x + 7, x).trunc(3)
Poly(-x**3 - x + 1, x, domain='ZZ')
"""
p = f.rep.dom.convert(p)
if hasattr(f.rep, 'trunc'):
result = f.rep.trunc(p)
else: # pragma: no cover
raise OperationNotSupported(f, 'trunc')
return f.per(result)
def monic(f, auto=True):
"""
Divides all coefficients by ``LC(f)``.
Examples
========
>>> from sympy import Poly, ZZ
>>> from sympy.abc import x
>>> Poly(3*x**2 + 6*x + 9, x, domain=ZZ).monic()
Poly(x**2 + 2*x + 3, x, domain='QQ')
>>> Poly(3*x**2 + 4*x + 2, x, domain=ZZ).monic()
Poly(x**2 + 4/3*x + 2/3, x, domain='QQ')
"""
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'monic'):
result = f.rep.monic()
else: # pragma: no cover
raise OperationNotSupported(f, 'monic')
return f.per(result)
def content(f):
"""
Returns the GCD of polynomial coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(6*x**2 + 8*x + 12, x).content()
2
"""
if hasattr(f.rep, 'content'):
result = f.rep.content()
else: # pragma: no cover
raise OperationNotSupported(f, 'content')
return f.rep.dom.to_sympy(result)
def primitive(f):
"""
Returns the content and a primitive form of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 8*x + 12, x).primitive()
(2, Poly(x**2 + 4*x + 6, x, domain='ZZ'))
"""
if hasattr(f.rep, 'primitive'):
cont, result = f.rep.primitive()
else: # pragma: no cover
raise OperationNotSupported(f, 'primitive')
return f.rep.dom.to_sympy(cont), f.per(result)
def compose(f, g):
"""
Computes the functional composition of ``f`` and ``g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x, x).compose(Poly(x - 1, x))
Poly(x**2 - x, x, domain='ZZ')
"""
_, per, F, G = f._unify(g)
if hasattr(f.rep, 'compose'):
result = F.compose(G)
else: # pragma: no cover
raise OperationNotSupported(f, 'compose')
return per(result)
def decompose(f):
"""
Computes a functional decomposition of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**4 + 2*x**3 - x - 1, x, domain='ZZ').decompose()
[Poly(x**2 - x - 1, x, domain='ZZ'), Poly(x**2 + x, x, domain='ZZ')]
"""
if hasattr(f.rep, 'decompose'):
result = f.rep.decompose()
else: # pragma: no cover
raise OperationNotSupported(f, 'decompose')
return map(f.per, result)
def shift(f, a):
"""
Efficiently compute Taylor shift ``f(x + a)``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).shift(2)
Poly(x**2 + 2*x + 1, x, domain='ZZ')
"""
if hasattr(f.rep, 'shift'):
result = f.rep.shift(a)
else: # pragma: no cover
raise OperationNotSupported(f, 'shift')
return f.per(result)
def sturm(f, auto=True):
"""
Computes the Sturm sequence of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 2*x**2 + x - 3, x).sturm()
[Poly(x**3 - 2*x**2 + x - 3, x, domain='QQ'),
Poly(3*x**2 - 4*x + 1, x, domain='QQ'),
Poly(2/9*x + 25/9, x, domain='QQ'),
Poly(-2079/4, x, domain='QQ')]
"""
if auto and f.rep.dom.has_Ring:
f = f.to_field()
if hasattr(f.rep, 'sturm'):
result = f.rep.sturm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sturm')
return map(f.per, result)
def gff_list(f):
"""
Computes greatest factorial factorization of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> Poly(f).gff_list()
[(Poly(x, x, domain='ZZ'), 1), (Poly(x + 2, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'gff_list'):
result = f.rep.gff_list()
else: # pragma: no cover
raise OperationNotSupported(f, 'gff_list')
return [ (f.per(g), k) for g, k in result ]
def sqf_norm(f):
"""
Computes square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import Poly, sqrt
>>> from sympy.abc import x
>>> s, f, r = Poly(x**2 + 1, x, extension=[sqrt(3)]).sqf_norm()
>>> s
1
>>> f
Poly(x**2 - 2*sqrt(3)*x + 4, x, domain='QQ<sqrt(3)>')
>>> r
Poly(x**4 - 4*x**2 + 16, x, domain='QQ')
"""
if hasattr(f.rep, 'sqf_norm'):
s, g, r = f.rep.sqf_norm()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_norm')
return s, f.per(g), f.per(r)
def sqf_part(f):
"""
Computes square-free part of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**3 - 3*x - 2, x).sqf_part()
Poly(x**2 - x - 2, x, domain='ZZ')
"""
if hasattr(f.rep, 'sqf_part'):
result = f.rep.sqf_part()
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_part')
return f.per(result)
def sqf_list(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = 2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16
>>> Poly(f).sqf_list()
(2, [(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
>>> Poly(f).sqf_list(all=True)
(2, [(Poly(1, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 2),
(Poly(x + 2, x, domain='ZZ'), 3)])
"""
if hasattr(f.rep, 'sqf_list'):
coeff, factors = f.rep.sqf_list(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list')
return f.rep.dom.to_sympy(coeff), [ (f.per(g), k) for g, k in factors ]
def sqf_list_include(f, all=False):
"""
Returns a list of square-free factors of ``f``.
Examples
========
>>> from sympy import Poly, expand
>>> from sympy.abc import x
>>> f = expand(2*(x + 1)**3*x**4)
>>> f
2*x**7 + 6*x**6 + 6*x**5 + 2*x**4
>>> Poly(f).sqf_list_include()
[(Poly(2, x, domain='ZZ'), 1),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
>>> Poly(f).sqf_list_include(all=True)
[(Poly(2, x, domain='ZZ'), 1),
(Poly(1, x, domain='ZZ'), 2),
(Poly(x + 1, x, domain='ZZ'), 3),
(Poly(x, x, domain='ZZ'), 4)]
"""
if hasattr(f.rep, 'sqf_list_include'):
factors = f.rep.sqf_list_include(all)
else: # pragma: no cover
raise OperationNotSupported(f, 'sqf_list_include')
return [ (f.per(g), k) for g, k in factors ]
def factor_list(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list()
(2, [(Poly(x + y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)])
"""
if hasattr(f.rep, 'factor_list'):
try:
coeff, factors = f.rep.factor_list()
except DomainError:
return S.One, [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list')
return f.rep.dom.to_sympy(coeff), [ (f.per(g), k) for g, k in factors ]
def factor_list_include(f):
"""
Returns a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> f = 2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y
>>> Poly(f).factor_list_include()
[(Poly(2*x + 2*y, x, y, domain='ZZ'), 1),
(Poly(x**2 + 1, x, y, domain='ZZ'), 2)]
"""
if hasattr(f.rep, 'factor_list_include'):
try:
factors = f.rep.factor_list_include()
except DomainError:
return [(f, 1)]
else: # pragma: no cover
raise OperationNotSupported(f, 'factor_list_include')
return [ (f.per(g), k) for g, k in factors ]
def intervals(f, all=False, eps=None, inf=None, sup=None, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).intervals()
[((-2, -1), 1), ((1, 2), 1)]
>>> Poly(x**2 - 3, x).intervals(eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = QQ.convert(inf)
if sup is not None:
sup = QQ.convert(sup)
if hasattr(f.rep, 'intervals'):
result = f.rep.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else: # pragma: no cover
raise OperationNotSupported(f, 'intervals')
if sqf:
def _real(interval):
s, t = interval
return (QQ.to_sympy(s), QQ.to_sympy(t))
if not all:
return map(_real, result)
def _complex(rectangle):
(u, v), (s, t) = rectangle
return (QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t))
real_part, complex_part = result
return map(_real, real_part), map(_complex, complex_part)
else:
def _real(interval):
(s, t), k = interval
return ((QQ.to_sympy(s), QQ.to_sympy(t)), k)
if not all:
return map(_real, result)
def _complex(rectangle):
((u, v), (s, t)), k = rectangle
return ((QQ.to_sympy(u) + I*QQ.to_sympy(v),
QQ.to_sympy(s) + I*QQ.to_sympy(t)), k)
real_part, complex_part = result
return map(_real, real_part), map(_complex, complex_part)
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3, x).refine_root(1, 2, eps=1e-2)
(19/11, 26/15)
"""
if check_sqf and not f.is_sqf:
raise PolynomialError("only square-free polynomials supported")
s, t = QQ.convert(s), QQ.convert(t)
if eps is not None:
eps = QQ.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if steps is not None:
steps = int(steps)
elif eps is None:
steps = 1
if hasattr(f.rep, 'refine_root'):
S, T = f.rep.refine_root(s, t, eps=eps, steps=steps, fast=fast)
else: # pragma: no cover
raise OperationNotSupported(f, 'refine_root')
return QQ.to_sympy(S), QQ.to_sympy(T)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
Examples
========
>>> from sympy import Poly, I
>>> from sympy.abc import x
>>> Poly(x**4 - 4, x).count_roots(-3, 3)
2
>>> Poly(x**4 - 4, x).count_roots(0, 1 + 3*I)
1
"""
inf_real, sup_real = True, True
if inf is not None:
inf = sympify(inf)
if inf is S.NegativeInfinity:
inf = None
else:
re, im = inf.as_real_imag()
if not im:
inf = QQ.convert(inf)
else:
inf, inf_real = map(QQ.convert, (re, im)), False
if sup is not None:
sup = sympify(sup)
if sup is S.Infinity:
sup = None
else:
re, im = sup.as_real_imag()
if not im:
sup = QQ.convert(sup)
else:
sup, sup_real = map(QQ.convert, (re, im)), False
if inf_real and sup_real:
if hasattr(f.rep, 'count_real_roots'):
count = f.rep.count_real_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_real_roots')
else:
if inf_real and inf is not None:
inf = (inf, QQ.zero)
if sup_real and sup is not None:
sup = (sup, QQ.zero)
if hasattr(f.rep, 'count_complex_roots'):
count = f.rep.count_complex_roots(inf=inf, sup=sup)
else: # pragma: no cover
raise OperationNotSupported(f, 'count_complex_roots')
return Integer(count)
def root(f, index, radicals=True):
"""
Get an indexed root of a polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(2*x**3 - 7*x**2 + 4*x + 4)
>>> f.root(0)
-1/2
>>> f.root(1)
2
>>> f.root(2)
2
>>> f.root(3)
Traceback (most recent call last):
...
IndexError: root index out of [-3, 2] range, got 3
>>> Poly(x**5 + x + 1).root(0)
RootOf(x**3 - x**2 + 1, 0)
"""
return sympy.polys.rootoftools.RootOf(f, index, radicals=radicals)
def real_roots(f, multiple=True, radicals=True):
"""
Return a list of real roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).real_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).real_roots()
[RootOf(x**3 + x + 1, 0)]
"""
reals = sympy.polys.rootoftools.RootOf.real_roots(f, radicals=radicals)
if multiple:
return reals
else:
return group(reals, multiple=False)
def all_roots(f, multiple=True, radicals=True):
"""
Return a list of real and complex roots with multiplicities.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**3 - 7*x**2 + 4*x + 4).all_roots()
[-1/2, 2, 2]
>>> Poly(x**3 + x + 1).all_roots()
[RootOf(x**3 + x + 1, 0), RootOf(x**3 + x + 1, 1), RootOf(x**3 + x + 1, 2)]
"""
roots = sympy.polys.rootoftools.RootOf.all_roots(f, radicals=radicals)
if multiple:
return roots
else:
return group(roots, multiple=False)
def nroots(f, n=15, maxsteps=50, cleanup=True, error=False):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 3).nroots(n=15)
[-1.73205080756888, 1.73205080756888]
>>> Poly(x**2 - 3).nroots(n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
if f.is_multivariate:
raise MultivariatePolynomialError("can't compute numerical roots of %s" % f)
if f.degree() <= 0:
return []
coeffs = [ coeff.evalf(n=n).as_real_imag() for coeff in f.all_coeffs() ]
dps = sympy.mpmath.mp.dps
sympy.mpmath.mp.dps = n
try:
try:
coeffs = [ sympy.mpmath.mpc(*coeff) for coeff in coeffs ]
except TypeError:
raise DomainError("numerical domain expected, got %s" % f.rep.dom)
result = sympy.mpmath.polyroots(coeffs, maxsteps=maxsteps, cleanup=cleanup, error=error)
if error:
roots, error = result
else:
roots, error = result, None
roots = map(sympify, sorted(roots, key=lambda r: (r.real, r.imag)))
finally:
sympy.mpmath.mp.dps = dps
if error is not None:
return roots, sympify(error)
else:
return roots
def ground_roots(f):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**6 - 4*x**4 + 4*x**3 - x**2).ground_roots()
{0: 2, 1: 2}
"""
if f.is_multivariate:
raise MultivariatePolynomialError("can't compute ground roots of %s" % f)
roots = {}
for factor, k in f.factor_list()[1]:
if factor.is_linear:
a, b = factor.all_coeffs()
roots[-b/a] = k
return roots
def nth_power_roots_poly(f, n):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = Poly(x**4 - x**2 + 1)
>>> f.nth_power_roots_poly(2)
Poly(x**4 - 2*x**3 + 3*x**2 - 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(3)
Poly(x**4 + 2*x**2 + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(4)
Poly(x**4 + 2*x**3 + 3*x**2 + 2*x + 1, x, domain='ZZ')
>>> f.nth_power_roots_poly(12)
Poly(x**4 - 4*x**3 + 6*x**2 - 4*x + 1, x, domain='ZZ')
"""
if f.is_multivariate:
raise MultivariatePolynomialError("must be a univariate polynomial")
N = sympify(n)
if N.is_Integer and N >= 1:
n = int(N)
else:
raise ValueError("'n' must an integer and n >= 1, got %s" % n)
x = f.gen
t = Dummy('t')
r = f.resultant(f.__class__.from_expr(x**n - t, x, t))
return r.replace(t, x)
def cancel(f, g, include=False):
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x))
(1, Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
>>> Poly(2*x**2 - 2, x).cancel(Poly(x**2 - 2*x + 1, x), include=True)
(Poly(2*x + 2, x, domain='ZZ'), Poly(x - 1, x, domain='ZZ'))
"""
dom, per, F, G = f._unify(g)
if hasattr(F, 'cancel'):
result = F.cancel(G, include=include)
else: # pragma: no cover
raise OperationNotSupported(f, 'cancel')
if not include:
if dom.has_assoc_Ring:
dom = dom.get_ring()
cp, cq, p, q = result
cp = dom.to_sympy(cp)
cq = dom.to_sympy(cq)
return cp/cq, per(p), per(q)
else:
return tuple(map(per, result))
@property
def is_zero(f):
"""
Returns ``True`` if ``f`` is a zero polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_zero
True
>>> Poly(1, x).is_zero
False
"""
return f.rep.is_zero
@property
def is_one(f):
"""
Returns ``True`` if ``f`` is a unit polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(0, x).is_one
False
>>> Poly(1, x).is_one
True
"""
return f.rep.is_one
@property
def is_sqf(f):
"""
Returns ``True`` if ``f`` is a square-free polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 - 2*x + 1, x).is_sqf
False
>>> Poly(x**2 - 1, x).is_sqf
True
"""
return f.rep.is_sqf
@property
def is_monic(f):
"""
Returns ``True`` if the leading coefficient of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x + 2, x).is_monic
True
>>> Poly(2*x + 2, x).is_monic
False
"""
return f.rep.is_monic
@property
def is_primitive(f):
"""
Returns ``True`` if GCD of the coefficients of ``f`` is one.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(2*x**2 + 6*x + 12, x).is_primitive
False
>>> Poly(x**2 + 3*x + 6, x).is_primitive
True
"""
return f.rep.is_primitive
@property
def is_ground(f):
"""
Returns ``True`` if ``f`` is an element of the ground domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x, x).is_ground
False
>>> Poly(2, x).is_ground
True
>>> Poly(y, x).is_ground
True
"""
return f.rep.is_ground
@property
def is_linear(f):
"""
Returns ``True`` if ``f`` is linear in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x + y + 2, x, y).is_linear
True
>>> Poly(x*y + 2, x, y).is_linear
False
"""
return f.rep.is_linear
@property
def is_quadratic(f):
"""
Returns ``True`` if ``f`` is quadratic in all its variables.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x*y + 2, x, y).is_quadratic
True
>>> Poly(x*y**2 + 2, x, y).is_quadratic
False
"""
return f.rep.is_quadratic
@property
def is_monomial(f):
"""
Returns ``True`` if ``f`` is zero or has only one term.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(3*x**2, x).is_monomial
True
>>> Poly(3*x**2 + 1, x).is_monomial
False
"""
return f.rep.is_monomial
@property
def is_homogeneous(f):
"""
Returns ``True`` if ``f`` is a homogeneous polynomial.
A homogeneous polynomial is a polynomial whose all monomials with
non-zero coefficients have the same total degree. If you want not
only to check if a polynomial is homogeneous but also compute its
homogeneous order, then use :func:`Poly.homogeneous_order`.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x*y, x, y).is_homogeneous
True
>>> Poly(x**3 + x*y, x, y).is_homogeneous
False
"""
return f.rep.is_homogeneous
@property
def is_irreducible(f):
"""
Returns ``True`` if ``f`` has no factors over its domain.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> Poly(x**2 + x + 1, x, modulus=2).is_irreducible
True
>>> Poly(x**2 + 1, x, modulus=2).is_irreducible
False
"""
return f.rep.is_irreducible
@property
def is_univariate(f):
"""
Returns ``True`` if ``f`` is a univariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_univariate
True
>>> Poly(x*y**2 + x*y + 1, x, y).is_univariate
False
>>> Poly(x*y**2 + x*y + 1, x).is_univariate
True
>>> Poly(x**2 + x + 1, x, y).is_univariate
False
"""
return len(f.gens) == 1
@property
def is_multivariate(f):
"""
Returns ``True`` if ``f`` is a multivariate polynomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x, y
>>> Poly(x**2 + x + 1, x).is_multivariate
False
>>> Poly(x*y**2 + x*y + 1, x, y).is_multivariate
True
>>> Poly(x*y**2 + x*y + 1, x).is_multivariate
False
>>> Poly(x**2 + x + 1, x, y).is_multivariate
True
"""
return len(f.gens) != 1
@property
def is_cyclotomic(f):
"""
Returns ``True`` if ``f`` is a cyclotomic polnomial.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> f = x**16 + x**14 - x**10 + x**8 - x**6 + x**2 + 1
>>> Poly(f).is_cyclotomic
False
>>> g = x**16 + x**14 - x**10 - x**8 - x**6 + x**2 + 1
>>> Poly(g).is_cyclotomic
True
"""
return f.rep.is_cyclotomic
def __abs__(f):
return f.abs()
def __neg__(f):
return f.neg()
@_sympifyit('g', NotImplemented)
def __add__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() + g
return f.add(g)
@_sympifyit('g', NotImplemented)
def __radd__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g + f.as_expr()
return g.add(f)
@_sympifyit('g', NotImplemented)
def __sub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr() - g
return f.sub(g)
@_sympifyit('g', NotImplemented)
def __rsub__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g - f.as_expr()
return g.sub(f)
@_sympifyit('g', NotImplemented)
def __mul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return f.as_expr()*g
return f.mul(g)
@_sympifyit('g', NotImplemented)
def __rmul__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, *f.gens)
except PolynomialError:
return g*f.as_expr()
return g.mul(f)
@_sympifyit('n', NotImplemented)
def __pow__(f, n):
if n.is_Integer and n >= 0:
return f.pow(n)
else:
return f.as_expr()**n
@_sympifyit('g', NotImplemented)
def __divmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.div(g)
@_sympifyit('g', NotImplemented)
def __rdivmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.div(f)
@_sympifyit('g', NotImplemented)
def __mod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.rem(g)
@_sympifyit('g', NotImplemented)
def __rmod__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.rem(f)
@_sympifyit('g', NotImplemented)
def __floordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return f.quo(g)
@_sympifyit('g', NotImplemented)
def __rfloordiv__(f, g):
if not g.is_Poly:
g = f.__class__(g, *f.gens)
return g.quo(f)
@_sympifyit('g', NotImplemented)
def __div__(f, g):
return f.as_expr()/g.as_expr()
@_sympifyit('g', NotImplemented)
def __rdiv__(f, g):
return g.as_expr()/f.as_expr()
__truediv__ = __div__
__rtruediv__ = __rdiv__
@_sympifyit('g', NotImplemented)
def __eq__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if f.gens != g.gens:
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
@_sympifyit('g', NotImplemented)
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return not f.is_zero
def eq(f, g, strict=False):
if not strict:
return f.__eq__(g)
else:
return f._strict_eq(sympify(g))
def ne(f, g, strict=False):
return not f.eq(g, strict=strict)
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.gens == g.gens and f.rep.eq(g.rep, strict=True)
class PurePoly(Poly):
"""Class for representing pure polynomials. """
def _hashable_content(self):
"""Allow SymPy to hash Poly instances. """
return (self.rep,)
def __hash__(self):
return super(PurePoly, self).__hash__()
@property
def free_symbols(self):
"""
Free symbols of a polynomial.
Examples
========
>>> from sympy import PurePoly
>>> from sympy.abc import x, y
>>> PurePoly(x**2 + 1).free_symbols
set()
>>> PurePoly(x**2 + y).free_symbols
set()
>>> PurePoly(x**2 + y, x).free_symbols
set([y])
"""
return self.free_symbols_in_domain
@_sympifyit('g', NotImplemented)
def __eq__(f, g):
if not g.is_Poly:
try:
g = f.__class__(g, f.gens, domain=f.get_domain())
except (PolynomialError, DomainError, CoercionFailed):
return False
if len(f.gens) != len(g.gens):
return False
if f.rep.dom != g.rep.dom:
try:
dom = f.rep.dom.unify(g.rep.dom, f.gens)
except UnificationFailed:
return False
f = f.set_domain(dom)
g = g.set_domain(dom)
return f.rep == g.rep
def _strict_eq(f, g):
return isinstance(g, f.__class__) and f.rep.eq(g.rep, strict=True)
def _unify(f, g):
g = sympify(g)
if not g.is_Poly:
try:
return f.rep.dom, f.per, f.rep, f.rep.per(f.rep.dom.from_sympy(g))
except CoercionFailed:
raise UnificationFailed("can't unify %s with %s" % (f, g))
if len(f.gens) != len(g.gens):
raise UnificationFailed("can't unify %s with %s" % (f, g))
if not (isinstance(f.rep, DMP) and isinstance(g.rep, DMP)):
raise UnificationFailed("can't unify %s with %s" % (f, g))
cls = f.__class__
gens = f.gens
dom = f.rep.dom.unify(g.rep.dom, gens)
F = f.rep.convert(dom)
G = g.rep.convert(dom)
def per(rep, dom=dom, gens=gens, remove=None):
if remove is not None:
gens = gens[:remove]+gens[remove+1:]
if not gens:
return dom.to_sympy(rep)
return cls.new(rep, *gens)
return dom, per, F, G
def poly_from_expr(expr, *gens, **args):
"""Construct a polynomial from an expression. """
opt = options.build_options(gens, args)
return _poly_from_expr(expr, opt)
def _poly_from_expr(expr, opt):
"""Construct a polynomial from an expression. """
orig, expr = expr, sympify(expr)
if not isinstance(expr, Basic):
raise PolificationFailed(opt, orig, expr)
elif expr.is_Poly:
poly = expr.__class__._from_poly(expr, opt)
opt['gens'] = poly.gens
opt['domain'] = poly.domain
if opt.polys is None:
opt['polys'] = True
return poly, opt
elif opt.expand:
expr = expr.expand()
try:
rep, opt = _dict_from_expr(expr, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, orig, expr)
monoms, coeffs = zip(*rep.items())
domain = opt.domain
if domain is None:
domain, coeffs = construct_domain(coeffs, opt=opt)
else:
coeffs = map(domain.from_sympy, coeffs)
level = len(opt.gens)-1
poly = Poly.new(DMP.from_monoms_coeffs(monoms, coeffs, level, domain), *opt.gens)
opt['domain'] = domain
if opt.polys is None:
opt['polys'] = False
return poly, opt
def parallel_poly_from_expr(exprs, *gens, **args):
"""Construct polynomials from expressions. """
opt = options.build_options(gens, args)
return _parallel_poly_from_expr(exprs, opt)
def _parallel_poly_from_expr(exprs, opt):
"""Construct polynomials from expressions. """
if len(exprs) == 2:
f, g = exprs
if isinstance(f, Poly) and isinstance(g, Poly):
f = f.__class__._from_poly(f, opt)
g = g.__class__._from_poly(g, opt)
f, g = f.unify(g)
opt['gens'] = f.gens
opt['domain'] = f.domain
if opt.polys is None:
opt['polys'] = True
return [f, g], opt
origs, exprs = list(exprs), []
_exprs, _polys = [], []
failed = False
for i, expr in enumerate(origs):
expr = sympify(expr)
if isinstance(expr, Basic):
if expr.is_Poly:
_polys.append(i)
else:
_exprs.append(i)
if opt.expand:
expr = expr.expand()
else:
failed = True
exprs.append(expr)
if failed:
raise PolificationFailed(opt, origs, exprs, True)
if _polys:
# XXX: this is a temporary solution
for i in _polys:
exprs[i] = exprs[i].as_expr()
try:
reps, opt = _parallel_dict_from_expr(exprs, opt)
except GeneratorsNeeded:
raise PolificationFailed(opt, origs, exprs, True)
coeffs_list, lengths = [], []
all_monoms = []
all_coeffs = []
for rep in reps:
monoms, coeffs = zip(*rep.items())
coeffs_list.extend(coeffs)
all_monoms.append(monoms)
lengths.append(len(coeffs))
domain = opt.domain
if domain is None:
domain, coeffs_list = construct_domain(coeffs_list, opt=opt)
else:
coeffs_list = map(domain.from_sympy, coeffs_list)
for k in lengths:
all_coeffs.append(coeffs_list[:k])
coeffs_list = coeffs_list[k:]
polys, level = [], len(opt.gens)-1
for monoms, coeffs in zip(all_monoms, all_coeffs):
rep = DMP.from_monoms_coeffs(monoms, coeffs, level, domain)
polys.append(Poly.new(rep, *opt.gens))
opt['domain'] = domain
if opt.polys is None:
opt['polys'] = bool(_polys)
return polys, opt
def _update_args(args, key, value):
"""Add a new ``(key, value)`` pair to arguments ``dict``. """
args = dict(args)
if key not in args:
args[key] = value
return args
def degree(f, *gens, **args):
"""
Return the degree of ``f`` in the given variable.
Examples
========
>>> from sympy import degree
>>> from sympy.abc import x, y
>>> degree(x**2 + y*x + 1, gen=x)
2
>>> degree(x**2 + y*x + 1, gen=y)
1
"""
options.allowed_flags(args, ['gen', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('degree', 1, exc)
return Integer(F.degree(opt.gen))
def degree_list(f, *gens, **args):
"""
Return a list of degrees of ``f`` in all variables.
Examples
========
>>> from sympy import degree_list
>>> from sympy.abc import x, y
>>> degree_list(x**2 + y*x + 1)
(2, 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('degree_list', 1, exc)
degrees = F.degree_list()
return tuple(map(Integer, degrees))
def LC(f, *gens, **args):
"""
Return the leading coefficient of ``f``.
Examples
========
>>> from sympy import LC
>>> from sympy.abc import x, y
>>> LC(4*x**2 + 2*x*y**2 + x*y + 3*y)
4
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('LC', 1, exc)
return F.LC(order=opt.order)
def LM(f, *gens, **args):
"""
Return the leading monomial of ``f``.
Examples
========
>>> from sympy import LM
>>> from sympy.abc import x, y
>>> LM(4*x**2 + 2*x*y**2 + x*y + 3*y)
x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('LM', 1, exc)
monom = F.LM(order=opt.order)
return monom.as_expr()
def LT(f, *gens, **args):
"""
Return the leading term of ``f``.
Examples
========
>>> from sympy import LT
>>> from sympy.abc import x, y
>>> LT(4*x**2 + 2*x*y**2 + x*y + 3*y)
4*x**2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('LT', 1, exc)
monom, coeff = F.LT(order=opt.order)
return coeff*monom.as_expr()
def pdiv(f, g, *gens, **args):
"""
Compute polynomial pseudo-division of ``f`` and ``g``.
Examples
========
>>> from sympy import pdiv
>>> from sympy.abc import x
>>> pdiv(x**2 + 1, 2*x - 4)
(2*x + 4, 20)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('pdiv', 2, exc)
q, r = F.pdiv(G)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
def prem(f, g, *gens, **args):
"""
Compute polynomial pseudo-remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import prem
>>> from sympy.abc import x
>>> prem(x**2 + 1, 2*x - 4)
20
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('prem', 2, exc)
r = F.prem(G)
if not opt.polys:
return r.as_expr()
else:
return r
def pquo(f, g, *gens, **args):
"""
Compute polynomial pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pquo
>>> from sympy.abc import x
>>> pquo(x**2 + 1, 2*x - 4)
2*x + 4
>>> pquo(x**2 - 1, 2*x - 1)
2*x + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('pquo', 2, exc)
q = F.pquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
def pexquo(f, g, *gens, **args):
"""
Compute polynomial exact pseudo-quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import pexquo
>>> from sympy.abc import x
>>> pexquo(x**2 - 1, 2*x - 2)
2*x + 2
>>> pexquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('pexquo', 2, exc)
q = F.pexquo(G)
if not opt.polys:
return q.as_expr()
else:
return q
def div(f, g, *gens, **args):
"""
Compute polynomial division of ``f`` and ``g``.
Examples
========
>>> from sympy import div, ZZ, QQ
>>> from sympy.abc import x
>>> div(x**2 + 1, 2*x - 4, domain=ZZ)
(0, x**2 + 1)
>>> div(x**2 + 1, 2*x - 4, domain=QQ)
(x/2 + 1, 5)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('div', 2, exc)
q, r = F.div(G, auto=opt.auto)
if not opt.polys:
return q.as_expr(), r.as_expr()
else:
return q, r
def rem(f, g, *gens, **args):
"""
Compute polynomial remainder of ``f`` and ``g``.
Examples
========
>>> from sympy import rem, ZZ, QQ
>>> from sympy.abc import x
>>> rem(x**2 + 1, 2*x - 4, domain=ZZ)
x**2 + 1
>>> rem(x**2 + 1, 2*x - 4, domain=QQ)
5
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('rem', 2, exc)
r = F.rem(G, auto=opt.auto)
if not opt.polys:
return r.as_expr()
else:
return r
def quo(f, g, *gens, **args):
"""
Compute polynomial quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import quo
>>> from sympy.abc import x
>>> quo(x**2 + 1, 2*x - 4)
x/2 + 1
>>> quo(x**2 - 1, x - 1)
x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('quo', 2, exc)
q = F.quo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
def exquo(f, g, *gens, **args):
"""
Compute polynomial exact quotient of ``f`` and ``g``.
Examples
========
>>> from sympy import exquo
>>> from sympy.abc import x
>>> exquo(x**2 - 1, x - 1)
x + 1
>>> exquo(x**2 + 1, 2*x - 4)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x - 4 does not divide x**2 + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('exquo', 2, exc)
q = F.exquo(G, auto=opt.auto)
if not opt.polys:
return q.as_expr()
else:
return q
def half_gcdex(f, g, *gens, **args):
"""
Half extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, h)`` such that ``h = gcd(f, g)`` and ``s*f = h (mod g)``.
Examples
========
>>> from sympy import half_gcdex
>>> from sympy.abc import x
>>> half_gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, h = domain.half_gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('half_gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(h)
s, h = F.half_gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), h.as_expr()
else:
return s, h
def gcdex(f, g, *gens, **args):
"""
Extended Euclidean algorithm of ``f`` and ``g``.
Returns ``(s, t, h)`` such that ``h = gcd(f, g)`` and ``s*f + t*g = h``.
Examples
========
>>> from sympy import gcdex
>>> from sympy.abc import x
>>> gcdex(x**4 - 2*x**3 - 6*x**2 + 12*x + 15, x**3 + x**2 - 4*x - 4)
(-x/5 + 3/5, x**2/5 - 6*x/5 + 2, x + 1)
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
s, t, h = domain.gcdex(a, b)
except NotImplementedError:
raise ComputationFailed('gcdex', 2, exc)
else:
return domain.to_sympy(s), domain.to_sympy(t),domain.to_sympy(h)
s, t, h = F.gcdex(G, auto=opt.auto)
if not opt.polys:
return s.as_expr(), t.as_expr(), h.as_expr()
else:
return s, t, h
def invert(f, g, *gens, **args):
"""
Invert ``f`` modulo ``g`` when possible.
Examples
========
>>> from sympy import invert
>>> from sympy.abc import x
>>> invert(x**2 - 1, 2*x - 1)
-4/3
>>> invert(x**2 - 1, x - 1)
Traceback (most recent call last):
...
NotInvertible: zero divisor
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.invert(a, b))
except NotImplementedError:
raise ComputationFailed('invert', 2, exc)
h = F.invert(G, auto=opt.auto)
if not opt.polys:
return h.as_expr()
else:
return h
def subresultants(f, g, *gens, **args):
"""
Compute subresultant PRS of ``f`` and ``g``.
Examples
========
>>> from sympy import subresultants
>>> from sympy.abc import x
>>> subresultants(x**2 + 1, x**2 - 1)
[x**2 + 1, x**2 - 1, -2]
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('subresultants', 2, exc)
result = F.subresultants(G)
if not opt.polys:
return [ r.as_expr() for r in result ]
else:
return result
def resultant(f, g, *gens, **args):
"""
Compute resultant of ``f`` and ``g``.
Examples
========
>>> from sympy import resultant
>>> from sympy.abc import x
>>> resultant(x**2 + 1, x**2 - 1)
4
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('resultant', 2, exc)
result = F.resultant(G)
if not opt.polys:
return result.as_expr()
else:
return result
def discriminant(f, *gens, **args):
"""
Compute discriminant of ``f``.
Examples
========
>>> from sympy import discriminant
>>> from sympy.abc import x
>>> discriminant(x**2 + 2*x + 3)
-8
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('discriminant', 1, exc)
result = F.discriminant()
if not opt.polys:
return result.as_expr()
else:
return result
def cofactors(f, g, *gens, **args):
"""
Compute GCD and cofactors of ``f`` and ``g``.
Returns polynomials ``(h, cff, cfg)`` such that ``h = gcd(f, g)``, and
``cff = quo(f, h)`` and ``cfg = quo(g, h)`` are, so called, cofactors
of ``f`` and ``g``.
Examples
========
>>> from sympy import cofactors
>>> from sympy.abc import x
>>> cofactors(x**2 - 1, x**2 - 3*x + 2)
(x - 1, x + 1, x - 2)
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
h, cff, cfg = domain.cofactors(a, b)
except NotImplementedError:
raise ComputationFailed('cofactors', 2, exc)
else:
return domain.to_sympy(h), domain.to_sympy(cff), domain.to_sympy(cfg)
h, cff, cfg = F.cofactors(G)
if not opt.polys:
return h.as_expr(), cff.as_expr(), cfg.as_expr()
else:
return h, cff, cfg
def gcd_list(seq, *gens, **args):
"""
Compute GCD of a list of polynomials.
Examples
========
>>> from sympy import gcd_list
>>> from sympy.abc import x
>>> gcd_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x - 1
"""
seq = sympify(seq)
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.zero
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.gcd(result, number)
if domain.is_one(result):
break
return domain.to_sympy(result)
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('gcd_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.Zero
else:
return Poly(0, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.gcd(poly)
if result.is_one:
break
if not opt.polys:
return result.as_expr()
else:
return result
def gcd(f, g=None, *gens, **args):
"""
Compute GCD of ``f`` and ``g``.
Examples
========
>>> from sympy import gcd
>>> from sympy.abc import x
>>> gcd(x**2 - 1, x**2 - 3*x + 2)
x - 1
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return gcd_list(f, *gens, **args)
elif g is None:
raise TypeError("gcd() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.gcd(a, b))
except NotImplementedError:
raise ComputationFailed('gcd', 2, exc)
result = F.gcd(G)
if not opt.polys:
return result.as_expr()
else:
return result
def lcm_list(seq, *gens, **args):
"""
Compute LCM of a list of polynomials.
Examples
========
>>> from sympy import lcm_list
>>> from sympy.abc import x
>>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])
x**5 - x**4 - 2*x**3 - x**2 + x + 2
"""
seq = sympify(seq)
if not gens and not args:
domain, numbers = construct_domain(seq)
if not numbers:
return domain.one
elif domain.is_Numerical:
result, numbers = numbers[0], numbers[1:]
for number in numbers:
result = domain.lcm(result, number)
return domain.to_sympy(result)
options.allowed_flags(args, ['polys'])
try:
polys, opt = parallel_poly_from_expr(seq, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('lcm_list', len(seq), exc)
if not polys:
if not opt.polys:
return S.One
else:
return Poly(1, opt=opt)
result, polys = polys[0], polys[1:]
for poly in polys:
result = result.lcm(poly)
if not opt.polys:
return result.as_expr()
else:
return result
def lcm(f, g=None, *gens, **args):
"""
Compute LCM of ``f`` and ``g``.
Examples
========
>>> from sympy import lcm
>>> from sympy.abc import x
>>> lcm(x**2 - 1, x**2 - 3*x + 2)
x**3 - 2*x**2 - x + 2
"""
if hasattr(f, '__iter__'):
if g is not None:
gens = (g,) + gens
return lcm_list(f, *gens, **args)
elif g is None:
raise TypeError("lcm() takes 2 arguments or a sequence of arguments")
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
domain, (a, b) = construct_domain(exc.exprs)
try:
return domain.to_sympy(domain.lcm(a, b))
except NotImplementedError:
raise ComputationFailed('lcm', 2, exc)
result = F.lcm(G)
if not opt.polys:
return result.as_expr()
else:
return result
def terms_gcd(f, *gens, **args):
"""
Remove GCD of terms from ``f``.
If the ``deep`` flag is True, then the arguments of ``f`` will have
terms_gcd applied to them.
If a fraction is factored out of ``f`` and ``f`` is an Add, then
an unevaluated Mul will be returned so that automatic simplification
does not redistribute it. The hint ``clear``, when set to False, can be
used to prevent such factoring when all coefficients are not fractions.
Examples
========
>>> from sympy import terms_gcd, cos, pi
>>> from sympy.abc import x, y
>>> terms_gcd(x**6*y**2 + x**3*y, x, y)
x**3*y*(x**3*y + 1)
The default action of polys routines is to expand the expression
given to them. terms_gcd follows this behavior:
>>> terms_gcd((3+3*x)*(x+x*y))
3*x*(x*y + x + y + 1)
If this is not desired then the hint ``expand`` can be set to False.
In this case the expression will be treated as though it were comprised
of one or more terms:
>>> terms_gcd((3+3*x)*(x+x*y), expand=False)
(3*x + 3)*(x*y + x)
In order to traverse factors of a Mul or the arguments of other
functions, the ``deep`` hint can be used:
>>> terms_gcd((3 + 3*x)*(x + x*y), expand=False, deep=True)
3*x*(x + 1)*(y + 1)
>>> terms_gcd(cos(x + x*y), deep=True)
cos(x*(y + 1))
Rationals are factored out by default:
>>> terms_gcd(x + y/2)
(2*x + y)/2
Only the y-term had a coefficient that was a fraction; if one
does not want to factor out the 1/2 in cases like this, the
flag ``clear`` can be set to False:
>>> terms_gcd(x + y/2, clear=False)
x + y/2
The ``clear`` flag is ignored if all coefficients are fractions:
>>> terms_gcd(x/3 + y/2, clear=False)
(2*x + 3*y)/6
See Also
========
sympy.core.exprtools.gcd_terms, sympy.core.exprtools.factor_terms
"""
if not isinstance(f, Expr) or f.is_Atom:
return sympify(f)
if args.get('deep', False):
new = f.func(*[terms_gcd(a, *gens, **args) for a in f.args])
args.pop('deep')
args['expand'] = False
return terms_gcd(new, *gens, **args)
clear = args.pop('clear', True)
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
return exc.expr
J, f = F.terms_gcd()
if opt.domain.has_Ring:
if opt.domain.has_Field:
denom, f = f.clear_denoms(convert=True)
coeff, f = f.primitive()
if opt.domain.has_Field:
coeff /= denom
else:
coeff = S.One
term = Mul(*[ x**j for x, j in zip(f.gens, J) ])
return _keep_coeff(coeff, term*f.as_expr(), clear=clear)
def trunc(f, p, *gens, **args):
"""
Reduce ``f`` modulo a constant ``p``.
Examples
========
>>> from sympy import trunc
>>> from sympy.abc import x
>>> trunc(2*x**3 + 3*x**2 + 5*x + 7, 3)
-x**3 - x + 1
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('trunc', 1, exc)
result = F.trunc(sympify(p))
if not opt.polys:
return result.as_expr()
else:
return result
def monic(f, *gens, **args):
"""
Divide all coefficients of ``f`` by ``LC(f)``.
Examples
========
>>> from sympy import monic
>>> from sympy.abc import x
>>> monic(3*x**2 + 4*x + 2)
x**2 + 4*x/3 + 2/3
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('monic', 1, exc)
result = F.monic(auto=opt.auto)
if not opt.polys:
return result.as_expr()
else:
return result
def content(f, *gens, **args):
"""
Compute GCD of coefficients of ``f``.
Examples
========
>>> from sympy import content
>>> from sympy.abc import x
>>> content(6*x**2 + 8*x + 12)
2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('content', 1, exc)
return F.content()
def primitive(f, *gens, **args):
"""
Compute content and the primitive form of ``f``.
Examples
========
>>> from sympy.polys.polytools import primitive
>>> from sympy.abc import x, y
>>> primitive(6*x**2 + 8*x + 12)
(2, 3*x**2 + 4*x + 6)
>>> eq = (2 + 2*x)*x + 2
Expansion is performed by default:
>>> primitive(eq)
(2, x**2 + x + 1)
Set ``expand`` to False to shut this off. Note that the
extraction will not be recursive; use the as_content_primitive method
for recursive, non-destructive Rational extraction.
>>> primitive(eq, expand=False)
(1, x*(2*x + 2) + 2)
>>> eq.as_content_primitive()
(2, x*(x + 1) + 1)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('primitive', 1, exc)
cont, result = F.primitive()
if not opt.polys:
return cont, result.as_expr()
else:
return cont, result
def compose(f, g, *gens, **args):
"""
Compute functional composition ``f(g)``.
Examples
========
>>> from sympy import compose
>>> from sympy.abc import x
>>> compose(x**2 + x, x - 1)
x**2 - x
"""
options.allowed_flags(args, ['polys'])
try:
(F, G), opt = parallel_poly_from_expr((f, g), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('compose', 2, exc)
result = F.compose(G)
if not opt.polys:
return result.as_expr()
else:
return result
def decompose(f, *gens, **args):
"""
Compute functional decomposition of ``f``.
Examples
========
>>> from sympy import decompose
>>> from sympy.abc import x
>>> decompose(x**4 + 2*x**3 - x - 1)
[x**2 - x - 1, x**2 + x]
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('decompose', 1, exc)
result = F.decompose()
if not opt.polys:
return [ r.as_expr() for r in result ]
else:
return result
def sturm(f, *gens, **args):
"""
Compute Sturm sequence of ``f``.
Examples
========
>>> from sympy import sturm
>>> from sympy.abc import x
>>> sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2*x/9 + 25/9, -2079/4]
"""
options.allowed_flags(args, ['auto', 'polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('sturm', 1, exc)
result = F.sturm(auto=opt.auto)
if not opt.polys:
return [ r.as_expr() for r in result ]
else:
return result
def gff_list(f, *gens, **args):
"""
Compute a list of greatest factorial factors of ``f``.
Examples
========
>>> from sympy import gff_list, ff
>>> from sympy.abc import x
>>> f = x**5 + 2*x**4 - x**3 - 2*x**2
>>> gff_list(f)
[(x, 1), (x + 2, 4)]
>>> (ff(x, 1)*ff(x + 2, 4)).expand() == f
True
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('gff_list', 1, exc)
factors = F.gff_list()
if not opt.polys:
return [ (g.as_expr(), k) for g, k in factors ]
else:
return factors
def gff(f, *gens, **args):
"""Compute greatest factorial factorization of ``f``. """
raise NotImplementedError('symbolic falling factorial')
def sqf_norm(f, *gens, **args):
"""
Compute square-free norm of ``f``.
Returns ``s``, ``f``, ``r``, such that ``g(x) = f(x-sa)`` and
``r(x) = Norm(g(x))`` is a square-free polynomial over ``K``,
where ``a`` is the algebraic extension of the ground domain.
Examples
========
>>> from sympy import sqf_norm, sqrt
>>> from sympy.abc import x
>>> sqf_norm(x**2 + 1, extension=[sqrt(3)])
(1, x**2 - 2*sqrt(3)*x + 4, x**4 - 4*x**2 + 16)
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('sqf_norm', 1, exc)
s, g, r = F.sqf_norm()
if not opt.polys:
return Integer(s), g.as_expr(), r.as_expr()
else:
return Integer(s), g, r
def sqf_part(f, *gens, **args):
"""
Compute square-free part of ``f``.
Examples
========
>>> from sympy import sqf_part
>>> from sympy.abc import x
>>> sqf_part(x**3 - 3*x - 2)
x**2 - x - 2
"""
options.allowed_flags(args, ['polys'])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('sqf_part', 1, exc)
result = F.sqf_part()
if not opt.polys:
return result.as_expr()
else:
return result
def _sorted_factors(factors, method):
"""Sort a list of ``(expr, exp)`` pairs. """
if method == 'sqf':
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (exp, len(rep), rep)
else:
def key(obj):
poly, exp = obj
rep = poly.rep.rep
return (len(rep), exp, rep)
return sorted(factors, key=key)
def _factors_product(factors):
"""Multiply a list of ``(expr, exp)`` pairs. """
return Mul(*[ f.as_expr()**k for f, k in factors ])
def _symbolic_factor_list(expr, opt, method):
"""Helper function for :func:`_symbolic_factor`. """
coeff, factors = S.One, []
for arg in Mul.make_args(expr):
if arg.is_Number:
coeff *= arg
continue
elif arg.is_Pow:
base, exp = arg.args
if base.is_Number:
factors.append((base, exp))
continue
else:
base, exp = arg, S.One
try:
poly, _ = _poly_from_expr(base, opt)
except PolificationFailed, exc:
factors.append((exc.expr, exp))
else:
func = getattr(poly, method + '_list')
_coeff, _factors = func()
if _coeff is not S.One:
if exp.is_Integer:
coeff *= _coeff**exp
elif _coeff.is_positive:
factors.append((_coeff, exp))
else:
_factors.append((_coeff, None))
if exp is S.One:
factors.extend(_factors)
elif exp.is_integer or len(_factors) == 1:
factors.extend([ (f, k*exp) for f, k in _factors ])
else:
other = []
for f, k in _factors:
if f.as_expr().is_positive:
factors.append((f, k*exp))
elif k is not None:
other.append((f, k))
else:
other.append((f, S.One))
if len(other) == 1:
f, k = other[0]
factors.append((f, k*exp))
else:
factors.append((_factors_product(other), exp))
return coeff, factors
def _symbolic_factor(expr, opt, method):
"""Helper function for :func:`_factor`. """
if isinstance(expr, Expr) and not expr.is_Relational:
coeff, factors = _symbolic_factor_list(together(expr), opt, method)
return _keep_coeff(coeff, _factors_product(factors))
elif hasattr(expr, 'args'):
return expr.func(*[ _symbolic_factor(arg, opt, method) for arg in expr.args ])
elif hasattr(expr, '__iter__'):
return expr.__class__([ _symbolic_factor(arg, opt, method) for arg in expr ])
else:
return expr
def _generic_factor_list(expr, gens, args, method):
"""Helper function for :func:`sqf_list` and :func:`factor_list`. """
options.allowed_flags(args, ['frac', 'polys'])
opt = options.build_options(gens, args)
expr = sympify(expr)
if isinstance(expr, Expr) and not expr.is_Relational:
numer, denom = together(expr).as_numer_denom()
cp, fp = _symbolic_factor_list(numer, opt, method)
cq, fq = _symbolic_factor_list(denom, opt, method)
if fq and not opt.frac:
raise PolynomialError("a polynomial expected, got %s" % expr)
_opt = opt.clone(dict(expand=True))
for factors in (fp, fq):
for i, (f, k) in enumerate(factors):
if not f.is_Poly:
f, _ = _poly_from_expr(f, _opt)
factors[i] = (f, k)
fp = _sorted_factors(fp, method)
fq = _sorted_factors(fq, method)
if not opt.polys:
fp = [ (f.as_expr(), k) for f, k in fp ]
fq = [ (f.as_expr(), k) for f, k in fq ]
coeff = cp/cq
if not opt.frac:
return coeff, fp
else:
return coeff, fp, fq
else:
raise PolynomialError("a polynomial expected, got %s" % expr)
def _generic_factor(expr, gens, args, method):
"""Helper function for :func:`sqf` and :func:`factor`. """
options.allowed_flags(args, [])
opt = options.build_options(gens, args)
return _symbolic_factor(sympify(expr), opt, method)
def sqf_list(f, *gens, **args):
"""
Compute a list of square-free factors of ``f``.
Examples
========
>>> from sympy import sqf_list
>>> from sympy.abc import x
>>> sqf_list(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
(2, [(x + 1, 2), (x + 2, 3)])
"""
return _generic_factor_list(f, gens, args, method='sqf')
def sqf(f, *gens, **args):
"""
Compute square-free factorization of ``f``.
Examples
========
>>> from sympy import sqf
>>> from sympy.abc import x
>>> sqf(2*x**5 + 16*x**4 + 50*x**3 + 76*x**2 + 56*x + 16)
2*(x + 1)**2*(x + 2)**3
"""
return _generic_factor(f, gens, args, method='sqf')
def factor_list(f, *gens, **args):
"""
Compute a list of irreducible factors of ``f``.
Examples
========
>>> from sympy import factor_list
>>> from sympy.abc import x, y
>>> factor_list(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
(2, [(x + y, 1), (x**2 + 1, 2)])
"""
return _generic_factor_list(f, gens, args, method='factor')
def factor(f, *gens, **args):
"""
Compute the factorization of ``f`` into irreducibles. (Use factorint to
factor an integer.)
There two modes implemented: symbolic and formal. If ``f`` is not an
instance of :class:`Poly` and generators are not specified, then the
former mode is used. Otherwise, the formal mode is used.
In symbolic mode, :func:`factor` will traverse the expression tree and
factor its components without any prior expansion, unless an instance
of :class:`Add` is encountered (in this case formal factorization is
used). This way :func:`factor` can handle large or symbolic exponents.
By default, the factorization is computed over the rationals. To factor
over other domain, e.g. an algebraic or finite field, use appropriate
options: ``extension``, ``modulus`` or ``domain``.
Examples
========
>>> from sympy import factor, sqrt
>>> from sympy.abc import x, y
>>> factor(2*x**5 + 2*x**4*y + 4*x**3 + 4*x**2*y + 2*x + 2*y)
2*(x + y)*(x**2 + 1)**2
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, gaussian=True)
(x - I)*(x + I)
>>> factor(x**2 - 2, extension=sqrt(2))
(x - sqrt(2))*(x + sqrt(2))
>>> factor((x**2 - 1)/(x**2 + 4*x + 4))
(x - 1)*(x + 1)/(x + 2)**2
>>> factor((x**2 + 4*x + 4)**10000000*(x**2 + 1))
(x + 2)**20000000*(x**2 + 1)
"""
try:
return _generic_factor(f, gens, args, method='factor')
except PolynomialError, msg:
if not f.is_commutative:
from sympy.core.exprtools import factor_nc
return factor_nc(f)
else:
raise PolynomialError(msg)
def intervals(F, all=False, eps=None, inf=None, sup=None, strict=False, fast=False, sqf=False):
"""
Compute isolating intervals for roots of ``f``.
Examples
========
>>> from sympy import intervals
>>> from sympy.abc import x
>>> intervals(x**2 - 3)
[((-2, -1), 1), ((1, 2), 1)]
>>> intervals(x**2 - 3, eps=1e-2)
[((-26/15, -19/11), 1), ((19/11, 26/15), 1)]
"""
if not hasattr(F, '__iter__'):
try:
F = Poly(F)
except GeneratorsNeeded:
return []
return F.intervals(all=all, eps=eps, inf=inf, sup=sup, fast=fast, sqf=sqf)
else:
polys, opt = parallel_poly_from_expr(F, domain='QQ')
if len(opt.gens) > 1:
raise MultivariatePolynomialError
for i, poly in enumerate(polys):
polys[i] = poly.rep.rep
if eps is not None:
eps = opt.domain.convert(eps)
if eps <= 0:
raise ValueError("'eps' must be a positive rational")
if inf is not None:
inf = opt.domain.convert(inf)
if sup is not None:
sup = opt.domain.convert(sup)
intervals = dup_isolate_real_roots_list(polys, opt.domain,
eps=eps, inf=inf, sup=sup, strict=strict, fast=fast)
result = []
for (s, t), indices in intervals:
s, t = opt.domain.to_sympy(s), opt.domain.to_sympy(t)
result.append(((s, t), indices))
return result
def refine_root(f, s, t, eps=None, steps=None, fast=False, check_sqf=False):
"""
Refine an isolating interval of a root to the given precision.
Examples
========
>>> from sympy import refine_root
>>> from sympy.abc import x
>>> refine_root(x**2 - 3, 1, 2, eps=1e-2)
(19/11, 26/15)
"""
try:
F = Poly(f)
except GeneratorsNeeded:
raise PolynomialError("can't refine a root of %s, not a polynomial" % f)
return F.refine_root(s, t, eps=eps, steps=steps, fast=fast, check_sqf=check_sqf)
def count_roots(f, inf=None, sup=None):
"""
Return the number of roots of ``f`` in ``[inf, sup]`` interval.
If one of ``inf`` or ``sup`` is complex, it will return the number of roots
in the complex rectangle with corners at ``inf`` and ``sup``.
Examples
========
>>> from sympy import count_roots, I
>>> from sympy.abc import x
>>> count_roots(x**4 - 4, -3, 3)
2
>>> count_roots(x**4 - 4, 0, 1 + 3*I)
1
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't count roots of %s, not a polynomial" % f)
return F.count_roots(inf=inf, sup=sup)
def real_roots(f, multiple=True):
"""
Return a list of real roots with multiplicities of ``f``.
Examples
========
>>> from sympy import real_roots
>>> from sympy.abc import x
>>> real_roots(2*x**3 - 7*x**2 + 4*x + 4)
[-1/2, 2, 2]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't compute real roots of %s, not a polynomial" % f)
return F.real_roots(multiple=multiple)
def nroots(f, n=15, maxsteps=50, cleanup=True, error=False):
"""
Compute numerical approximations of roots of ``f``.
Examples
========
>>> from sympy import nroots
>>> from sympy.abc import x
>>> nroots(x**2 - 3, n=15)
[-1.73205080756888, 1.73205080756888]
>>> nroots(x**2 - 3, n=30)
[-1.73205080756887729352744634151, 1.73205080756887729352744634151]
"""
try:
F = Poly(f, greedy=False)
except GeneratorsNeeded:
raise PolynomialError("can't compute numerical roots of %s, not a polynomial" % f)
return F.nroots(n=n, maxsteps=maxsteps, cleanup=cleanup, error=error)
def ground_roots(f, *gens, **args):
"""
Compute roots of ``f`` by factorization in the ground domain.
Examples
========
>>> from sympy import ground_roots
>>> from sympy.abc import x
>>> ground_roots(x**6 - 4*x**4 + 4*x**3 - x**2)
{0: 2, 1: 2}
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('ground_roots', 1, exc)
return F.ground_roots()
def nth_power_roots_poly(f, n, *gens, **args):
"""
Construct a polynomial with n-th powers of roots of ``f``.
Examples
========
>>> from sympy import nth_power_roots_poly, factor, roots
>>> from sympy.abc import x
>>> f = x**4 - x**2 + 1
>>> g = factor(nth_power_roots_poly(f, 2))
>>> g
(x**2 - x + 1)**2
>>> R_f = [ (r**2).expand() for r in roots(f) ]
>>> R_g = roots(g).keys()
>>> set(R_f) == set(R_g)
True
"""
options.allowed_flags(args, [])
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('nth_power_roots_poly', 1, exc)
result = F.nth_power_roots_poly(n)
if not opt.polys:
return result.as_expr()
else:
return result
def cancel(f, *gens, **args):
"""
Cancel common factors in a rational function ``f``.
Examples
========
>>> from sympy import cancel
>>> from sympy.abc import x
>>> cancel((2*x**2 - 2)/(x**2 - 2*x + 1))
(2*x + 2)/(x - 1)
"""
options.allowed_flags(args, ['polys'])
f = sympify(f)
if not isinstance(f, (tuple, Tuple)):
if f.is_Number:
return f
else:
p, q = f.as_numer_denom()
else:
p, q = f
try:
(F, G), opt = parallel_poly_from_expr((p, q), *gens, **args)
except PolificationFailed:
if not isinstance(f, (tuple, Tuple)):
return f
else:
return S.One, p, q
c, P, Q = F.cancel(G)
if not isinstance(f, (tuple, Tuple)):
return c*(P.as_expr()/Q.as_expr())
else:
if not opt.polys:
return c, P.as_expr(), Q.as_expr()
else:
return c, P, Q
def reduced(f, G, *gens, **args):
"""
Reduces a polynomial ``f`` modulo a set of polynomials ``G``.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import reduced
>>> from sympy.abc import x, y
>>> reduced(2*x**4 + y**2 - x**2 + y**3, [x**3 - x, y**3 - y])
([2*x, 1], x**2 + y**2 + y)
"""
options.allowed_flags(args, ['polys', 'auto'])
try:
polys, opt = parallel_poly_from_expr([f] + list(G), *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('reduced', 0, exc)
domain = opt.domain
retract = False
if opt.auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain = domain.get_field()))
retract = True
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = sdp_from_dict(poly, opt.order)
level = len(opt.gens)-1
Q, r = sdp_div(polys[0], polys[1:], level, opt.order, opt.domain)
Q = [ Poly._from_dict(dict(q), opt) for q in Q ]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [ q.to_ring() for q in Q ], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [ q.as_expr() for q in Q ], r.as_expr()
else:
return Q, r
def groebner(F, *gens, **args):
"""
Computes the reduced Groebner basis for a set of polynomials.
Use the ``order`` argument to set the monomial ordering that will be
used to compute the basis. Allowed orders are ``lex``, ``grlex`` and
``grevlex``. If no order is specified, it defaults to ``lex``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> F = [x*y - 2*y, 2*y**2 - x**2]
>>> groebner(F, x, y, order='lex')
GroebnerBasis([x**2 - 2*y**2, x*y - 2*y, y**3 - 2*y], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, order='grlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y, domain='ZZ', order='grlex')
>>> groebner(F, x, y, order='grevlex')
GroebnerBasis([y**3 - 2*y, x**2 - 2*y**2, x*y - 2*y], x, y, domain='ZZ', order='grevlex')
By default, an improved implementation of the Buchberger algorithm is
used. Optionally, an implementation of the F5B algorithm can be used.
The algorithm can be set using ``method`` flag or with the :func:`setup`
function from :mod:`sympy.polys.polyconfig`:
>>> F = [x**2 - x - 1, (2*x - 1) * y - (x**10 - (1 - x)**10)]
>>> groebner(F, x, y, method='buchberger')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
>>> groebner(F, x, y, method='f5b')
GroebnerBasis([x**2 - x - 1, y - 55], x, y, domain='ZZ', order='lex')
References
==========
1. [Buchberger01]_
2. [Cox97]_
"""
return GroebnerBasis(F, *gens, **args)
def is_zero_dimensional(F, *gens, **args):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
return GroebnerBasis(F, *gens, **args).is_zero_dimensional
class GroebnerBasis(Basic):
"""Represents a reduced Groebner basis. """
__slots__ = ['_basis', '_options']
def __new__(cls, F, *gens, **args):
"""Compute a reduced Groebner basis for a system of polynomials. """
options.allowed_flags(args, ['polys', 'method'])
try:
polys, opt = parallel_poly_from_expr(F, *gens, **args)
except PolificationFailed, exc:
raise ComputationFailed('groebner', len(F), exc)
domain = opt.domain
if domain.has_assoc_Field:
opt.domain = domain.get_field()
else:
raise DomainError("can't compute a Groebner basis over %s" % opt.domain)
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = sdp_from_dict(poly, opt.order)
level = len(opt.gens) - 1
G = sdp_groebner(polys, level, opt.order, opt.domain, method=opt.method)
G = [ Poly._from_dict(dict(g), opt) for g in G ]
if not domain.has_Field:
G = [ g.clear_denoms(convert=True)[1] for g in G ]
opt.domain = domain
return cls._new(G, opt)
@classmethod
def _new(cls, basis, options):
obj = Basic.__new__(cls)
obj._basis = tuple(basis)
obj._options = options
return obj
@property
def args(self):
return (Tuple(*self._basis), Tuple(*self._options.gens))
@property
def exprs(self):
return [ poly.as_expr() for poly in self._basis ]
@property
def polys(self):
return list(self._basis)
@property
def gens(self):
return self._options.gens
@property
def domain(self):
return self._options.domain
@property
def order(self):
return self._options.order
def __len__(self):
return len(self._basis)
def __iter__(self):
if self._options.polys:
return iter(self.polys)
else:
return iter(self.exprs)
def __getitem__(self, item):
if self._options.polys:
basis = self.polys
else:
basis = self.exprs
return basis[item]
def __hash__(self):
return hash((self._basis, tuple(self._options.items())))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._basis == other._basis and self._options == other._options
elif iterable(other):
return self.polys == list(other) or self.exprs == list(other)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_zero_dimensional(self):
"""
Checks if the ideal generated by a Groebner basis is zero-dimensional.
The algorithm checks if the set of monomials not divisible by the
leading monomial of any element of ``F`` is bounded.
References
==========
David A. Cox, John B. Little, Donal O'Shea. Ideals, Varieties and
Algorithms, 3rd edition, p. 230
"""
def single_var(monomial):
return sum(map(bool, monomial)) == 1
exponents = Monomial([0]*len(self.gens))
order = self._options.order
for poly in self.polys:
monomial = poly.LM(order=order)
if single_var(monomial):
exponents *= monomial
# If any element of the exponents vector is zero, then there's
# a variable for which there's no degree bound and the ideal
# generated by this Groebner basis isn't zero-dimensional.
return all(exponents)
def fglm(self, order):
"""
Convert a Groebner basis from one ordering to another.
The FGLM algorithm converts reduced Groebner bases of zero-dimensional
ideals from one ordering to another. This method is often used when it
is infeasible to compute a Groebner basis with respect to a particular
ordering directly.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import groebner
>>> F = [x**2 - 3*y - x + 1, y**2 - 2*x + y - 1]
>>> G = groebner(F, x, y, order='grlex')
>>> list(G.fglm('lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
>>> list(groebner(F, x, y, order='lex'))
[2*x - y**2 - y + 1, y**4 + 2*y**3 - 3*y**2 - 16*y + 7]
References
==========
J.C. Faugere, P. Gianni, D. Lazard, T. Mora (1994). Efficient
Computation of Zero-dimensional Groebner Bases by Change of
Ordering
J.C. Faugere's lecture notes:
http://www-salsa.lip6.fr/~jcf/Papers/2010_MPRI5e.pdf
"""
opt = self._options
src_order = opt.order
dst_order = monomial_key(order)
if src_order == dst_order:
return self
if not self.is_zero_dimensional:
raise NotImplementedError("can't convert Groebner bases of ideals with positive dimension")
polys = list(self._basis)
domain = opt.domain
opt = opt.clone(dict(
domain = domain.get_field(),
order = dst_order,
))
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = sdp_from_dict(poly, src_order)
level = len(opt.gens) - 1
G = matrix_fglm(polys, level, src_order, dst_order, opt.domain)
G = [ Poly._from_dict(dict(g), opt) for g in G ]
if not domain.has_Field:
G = [ g.clear_denoms(convert=True)[1] for g in G ]
opt.domain = domain
return self._new(G, opt)
def reduce(self, expr, auto=True):
"""
Reduces a polynomial modulo a Groebner basis.
Given a polynomial ``f`` and a set of polynomials ``G = (g_1, ..., g_n)``,
computes a set of quotients ``q = (q_1, ..., q_n)`` and the remainder ``r``
such that ``f = q_1*f_1 + ... + q_n*f_n + r``, where ``r`` vanishes or ``r``
is a completely reduced polynomial with respect to ``G``.
Examples
========
>>> from sympy import groebner, expand
>>> from sympy.abc import x, y
>>> f = 2*x**4 - x**2 + y**3 + y**2
>>> G = groebner([x**3 - x, y**3 - y])
>>> G.reduce(f)
([2*x, 1], x**2 + y**2 + y)
>>> Q, r = _
>>> expand(sum(q*g for q, g in zip(Q, G)) + r)
2*x**4 - x**2 + y**3 + y**2
>>> _ == f
True
"""
poly = Poly._from_expr(expr, self._options)
polys = [poly] + list(self._basis)
opt = self._options
domain = opt.domain
retract = False
if auto and domain.has_Ring and not domain.has_Field:
opt = opt.clone(dict(domain = domain.get_field()))
retract = True
for i, poly in enumerate(polys):
poly = poly.set_domain(opt.domain).rep.to_dict()
polys[i] = sdp_from_dict(poly, opt.order)
level = len(opt.gens) - 1
Q, r = sdp_div(polys[0], polys[1:], level, opt.order, opt.domain)
Q = [ Poly._from_dict(dict(q), opt) for q in Q ]
r = Poly._from_dict(dict(r), opt)
if retract:
try:
_Q, _r = [ q.to_ring() for q in Q ], r.to_ring()
except CoercionFailed:
pass
else:
Q, r = _Q, _r
if not opt.polys:
return [ q.as_expr() for q in Q ], r.as_expr()
else:
return Q, r
def contains(self, poly):
"""
Check if ``poly`` belongs the ideal generated by ``self``.
Examples
========
>>> from sympy import groebner
>>> from sympy.abc import x, y
>>> f = 2*x**3 + y**3 + 3*y
>>> G = groebner([x**2 + y**2 - 1, x*y - 2])
>>> G.contains(f)
True
>>> G.contains(f + 1)
False
"""
return self.reduce(poly)[1] == 0
def poly(expr, *gens, **args):
"""
Efficiently transform an expression into a polynomial.
Examples
========
>>> from sympy import poly
>>> from sympy.abc import x
>>> poly(x*(x**2 + x - 1)**2)
Poly(x**5 + 2*x**4 - x**3 - 2*x**2 + x, x, domain='ZZ')
"""
options.allowed_flags(args, [])
def _poly(expr, opt):
terms, poly_terms = [], []
for term in Add.make_args(expr):
factors, poly_factors = [], []
for factor in Mul.make_args(term):
if factor.is_Add:
poly_factors.append(_poly(factor, opt))
elif factor.is_Pow and factor.base.is_Add and factor.exp.is_Integer:
poly_factors.append(_poly(factor.base, opt).pow(factor.exp))
else:
factors.append(factor)
if not poly_factors:
terms.append(term)
else:
product = poly_factors[0]
for factor in poly_factors[1:]:
product = product.mul(factor)
if factors:
factor = Mul(*factors)
if factor.is_Number:
product = product.mul(factor)
else:
product = product.mul(Poly._from_expr(factor, opt))
poly_terms.append(product)
if not poly_terms:
result = Poly._from_expr(expr, opt)
else:
result = poly_terms[0]
for term in poly_terms[1:]:
result = result.add(term)
if terms:
term = Add(*terms)
if term.is_Number:
result = result.add(term)
else:
result = result.add(Poly._from_expr(term, opt))
return result.reorder(*opt.get('gens', ()), **args)
expr = sympify(expr)
if expr.is_Poly:
return Poly(expr, *gens, **args)
if 'expand' not in args:
args['expand'] = False
opt = options.build_options(gens, args)
return _poly(expr, opt)
|
srjoglekar246/sympy
|
sympy/polys/polytools.py
|
Python
|
bsd-3-clause
| 150,850
|
[
"Gaussian"
] |
a6dda0117b0cf532035bbeddef3cfaf97548478a642e7e086c8147c27785e462
|
# $HeadURL$
__RCSID__ = "$Id$"
import time
import select
import cStringIO
try:
from hashlib import md5
except:
from md5 import md5
from DIRAC.Core.Utilities.ReturnValues import S_ERROR, S_OK
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities import DEncode
class BaseTransport:
bAllowReuseAddress = True
iListenQueueSize = 5
iReadTimeout = 600
keepAliveMagic = "dka"
def __init__( self, stServerAddress, bServerMode = False, **kwargs ):
self.bServerMode = bServerMode
self.extraArgsDict = kwargs
self.byteStream = ""
self.packetSize = 1048576 #1MiB
self.stServerAddress = stServerAddress
self.peerCredentials = {}
self.remoteAddress = False
self.appData = ""
self.startedKeepAlives = set()
self.keepAliveId = md5( str( stServerAddress ) + str( bServerMode ) ).hexdigest()
self.receivedMessages = []
self.sentKeepAlives = 0
self.waitingForKeepAlivePong = False
self.__keepAliveLapse = 0
self.oSocket = None
if 'keepAliveLapse' in kwargs:
try:
self.__keepAliveLapse = max( 150, int( kwargs[ 'keepAliveLapse' ] ) )
except:
pass
self.__lastActionTimestamp = time.time()
self.__lastServerRenewTimestamp = self.__lastActionTimestamp
def __updateLastActionTimestamp( self ):
self.__lastActionTimestamp = time.time()
def getLastActionTimestamp( self ):
return self.__lastActionTimestamp
def getKeepAliveLapse( self ):
return self.__keepAliveLapse
def handshake( self ):
return S_OK()
def close( self ):
self.oSocket.close()
def setAppData( self, appData ):
self.appData = appData
def getAppData( self ):
return self.appData
def renewServerContext( self ):
self.__lastServerRenewTimestamp = time.time()
return S_OK()
def latestServerRenewTime( self ):
return self.__lastServerRenewTimestamp
def getConnectingCredentials( self ):
return self.peerCredentials
def setExtraCredentials( self, group ):
self.peerCredentials[ 'extraCredentials' ] = group
def serverMode( self ):
return self.bServerMode
def getRemoteAddress( self ):
return self.remoteAddress
def getLocalAddress( self ):
return self.oSocket.getsockname()
def getSocket( self ):
return self.oSocket
def _readReady( self ):
if not self.iReadTimeout:
return True
inList, dummy, dummy = select.select( [ self.oSocket ], [], [], self.iReadTimeout )
if self.oSocket in inList:
return True
return False
def _read( self, bufSize = 4096, skipReadyCheck = False ):
try:
if skipReadyCheck or self._readReady():
data = self.oSocket.recv( bufSize )
if not data:
return S_ERROR( "Connection closed by peer" )
else:
return S_OK( data )
else:
return S_ERROR( "Connection seems stalled. Closing..." )
except Exception, e:
return S_ERROR( "Exception while reading from peer: %s" % str( e ) )
def _write( self, buffer ):
return S_OK( self.oSocket.send( buffer ) )
def sendData( self, uData, prefix = False ):
self.__updateLastActionTimestamp()
sCodedData = DEncode.encode( uData )
if prefix:
dataToSend = "%s%s:%s" % ( prefix, len( sCodedData ), sCodedData )
else:
dataToSend = "%s:%s" % ( len( sCodedData ), sCodedData )
for index in range( 0, len( dataToSend ), self.packetSize ):
bytesToSend = min( self.packetSize, len( dataToSend ) - index )
packSentBytes = 0
while packSentBytes < bytesToSend:
try:
result = self._write( dataToSend[ index + packSentBytes : index + bytesToSend ] )
if not result[ 'OK' ]:
return result
sentBytes = result[ 'Value' ]
except Exception, e:
return S_ERROR( "Exception while sending data: %s" % e )
if sentBytes == 0:
return S_ERROR( "Connection closed by peer" )
packSentBytes += sentBytes
return S_OK()
def receiveData( self, maxBufferSize = 0, blockAfterKeepAlive = True, idleReceive = False ):
from DIRAC.Core.Utilities import DEncode
self.__updateLastActionTimestamp()
if self.receivedMessages:
return self.receivedMessages.pop( 0 )
#Buffer size can't be less than 0
maxBufferSize = max( maxBufferSize, 0 )
try:
#Look either for message length of keep alive magic string
iSeparatorPosition = self.byteStream.find( ":", 0, 10 )
keepAliveMagicLen = len( BaseTransport.keepAliveMagic )
isKeepAlive = self.byteStream.find( BaseTransport.keepAliveMagic, 0, keepAliveMagicLen ) == 0
#While not found the message length or the ka, keep receiving
while iSeparatorPosition == -1 and not isKeepAlive:
retVal = self._read( 16384 )
#If error return
if not retVal[ 'OK' ]:
return retVal
#If closed return error
if not retVal[ 'Value' ]:
return S_ERROR( "Peer closed connection" )
#New data!
self.byteStream += retVal[ 'Value' ]
#Look again for either message length of ka magic string
iSeparatorPosition = self.byteStream.find( ":", 0, 10 )
isKeepAlive = self.byteStream.find( BaseTransport.keepAliveMagic, 0, keepAliveMagicLen ) == 0
#Over the limit?
if maxBufferSize and len( self.byteStream ) > maxBufferSize and iSeparatorPosition == -1 :
return S_ERROR( "Read limit exceeded (%s chars)" % maxBufferSize )
#Keep alive magic!
if isKeepAlive:
gLogger.debug( "Received keep alive header" )
#Remove the ka magic from the buffer and process the keep alive
self.byteStream = self.byteStream[ keepAliveMagicLen: ]
return self.__processKeepAlive( maxBufferSize, blockAfterKeepAlive )
#From here it must be a real message!
#Process the size and remove the msg length from the bytestream
pkgSize = int( self.byteStream[ :iSeparatorPosition ] )
pkgData = self.byteStream[ iSeparatorPosition + 1: ]
readSize = len( pkgData )
if readSize >= pkgSize:
#If we already have all the data we need
data = pkgData[ :pkgSize ]
self.byteStream = pkgData[ pkgSize: ]
else:
#If we still need to read stuff
pkgMem = cStringIO.StringIO()
pkgMem.write( pkgData )
#Receive while there's still data to be received
while readSize < pkgSize:
retVal = self._read( pkgSize - readSize, skipReadyCheck = True )
if not retVal[ 'OK' ]:
return retVal
if not retVal[ 'Value' ]:
return S_ERROR( "Peer closed connection" )
rcvData = retVal[ 'Value' ]
readSize += len( rcvData )
pkgMem.write( rcvData )
if maxBufferSize and readSize > maxBufferSize:
return S_ERROR( "Read limit exceeded (%s chars)" % maxBufferSize )
#Data is here! take it out from the bytestream, dencode and return
if readSize == pkgSize:
data = pkgMem.getvalue()
self.byteStream = ""
else: #readSize > pkgSize:
pkgMem.seek( 0, 0 )
data = pkgMem.read( pkgSize )
self.byteStream = pkgMem.read()
try:
data = DEncode.decode( data )[0]
except Exception, e:
return S_ERROR( "Could not decode received data: %s" % str( e ) )
if idleReceive:
self.receivedMessages.append( data )
return S_OK()
return data
except Exception, e:
gLogger.exception( "Network error while receiving data" )
return S_ERROR( "Network error while receiving data: %s" % str( e ) )
def __processKeepAlive( self, maxBufferSize, blockAfterKeepAlive = True ):
gLogger.debug( "Received Keep Alive" )
#Next message down the stream will be the ka data
result = self.receiveData( maxBufferSize, blockAfterKeepAlive = False )
if not result[ 'OK' ]:
gLogger.debug( "Error while receiving keep alive: %s" % result[ 'Message' ] )
return result
#Is it a valid ka?
kaData = result[ 'Value' ]
for reqField in ( 'id', 'kaping' ):
if reqField not in kaData:
errMsg = "Invalid keep alive, missing %s" % reqField
gLogger.debug( errMsg )
return S_ERROR( errMsg )
gLogger.debug( "Received keep alive id %s" % kaData )
#Need to check if it's one of the keep alives we sent or one started from the other side
if kaData[ 'kaping' ]:
#This is a keep alive PING. Let's send the PONG
self.sendKeepAlive( responseId = kaData[ 'id' ] )
else:
#If it's a pong then we flag that we don't need to wait for a pong
self.waitingForKeepAlivePong = False
#No blockAfterKeepAlive means return without further read
if not blockAfterKeepAlive:
result = S_OK()
result[ 'keepAlive' ] = True
return result
#Let's listen for the next message downstream
return self.receiveData( maxBufferSize, blockAfterKeepAlive )
def sendKeepAlive( self, responseId = None, now = False ):
#If not responseId or not keepAliveLapse or not enough time has passed don't send keep alive
if not responseId:
if not self.__keepAliveLapse:
return S_OK()
if not now:
now = time.time()
if now - self.__lastActionTimestamp < self.__keepAliveLapse:
return S_OK()
self.__updateLastActionTimestamp()
if responseId:
self.waitingForKeepAlivePong = False
kaData = S_OK( { 'id' : responseId, 'kaping' : False } )
else:
if self.waitingForKeepAlivePong:
return S_OK()
id = self.keepAliveId + str( self.sentKeepAlives )
self.sentKeepAlives += 1
kaData = S_OK( { 'id' : id, 'kaping' : True } )
self.waitingForKeepAlivePong = True
return self.sendData( kaData , prefix = BaseTransport.keepAliveMagic )
def getFormattedCredentials( self ):
peerCreds = self.getConnectingCredentials()
address = self.getRemoteAddress()
if peerCreds.has_key( 'username' ):
peerId = "[%s:%s]" % ( peerCreds[ 'group' ], peerCreds[ 'username' ] )
else:
peerId = ""
if address[0].find( ":" ) > -1:
return "([%s]:%s)%s" % ( address[0], address[1], peerId )
return "(%s:%s)%s" % ( address[0], address[1], peerId )
|
vmendez/DIRAC
|
Core/DISET/private/Transports/BaseTransport.py
|
Python
|
gpl-3.0
| 10,296
|
[
"DIRAC"
] |
5d77413e5cf6d2417892e1e7fa980edebfdf44e6117514fece5f52e2b83ca9e1
|
import numpy as np
from astropy.table import Table
from astropy.io import fits
import matplotlib.pyplot as plt
import matplotlib
import pickle
from matplotlib import cm
from numpy.random import randn
# table path
path = "/Users/caojunzhi/Downloads/upload_20170330/red_clump_dr13.fits"
star = fits.open(path)
table = Table.read(path)
"""
There are 13 columns in the table:
1. 'APOGEEID' -- The name of the star
2. 'VISIT' -- The name of the visit file
3. BJD -- Barycentric JD
Inferred labels are from the Cannon. The spectra we use are from the first combined spectra
(There are two combined spectra for each star, which are obtained by two different methods)
: (1) global weighting, where each visit spectrum is weighted by its (S/N)2, and
(2) pixel-by-pixel weighting, where each pixel is weighted by its (S/N)2.
4. TEFF
5. LOGG
6. FEH
The abc parameters for each visit:
7. A -- parameter a
8. B -- parameter b
9. C -- parameter c
10. CHIINF -- chi-squared for the inferred flux from the cannon (a=0,b=1,c=0)
11. CHIMIX -- chi-squared for the mixed flux from the abc fit.
12. VBARY -- The barycentric Velocity(km/s) from the APOGEE team.
13. VSHIFT -- The velocity shift from the abc fit(km/s)
14. FIBER -- Fiber ID
15. SNR -- SNR of the visit
####
The covariance matrix of the abc fit is in HDU0 data, which is
a 3*3*N 3-d matrix. N is the number of visits.
###
"""
# read covariance matrix from the abc fit:
un_cov = star[0].data[:,:,0]
#print(un_cov)
# read the velocity shift from the abc fit
v_shift = table["VSHIFT"]
#print(v_shift.shape)
########################
#Read table and plot to check.
class plot():
def read_table(self):
path = "/Users/caojunzhi/Downloads/upload_20170330/red_clump_dr13.fits"
star = fits.open(path)
table = Table.read(path)
# read it:
un_cov = star[0].data
self.un_cov = un_cov
a = table["A"]
b = table["B"]
c = table["C"]
self.a = a
self.b = b
self.c = c
mask = 2*b>a+c
self.mask = mask
name = table["APOGEEID"]
self.name = name
SHIFT = table["VSHIFT"]
self.shift = SHIFT
VBARY = table["VBARY"]
self.VBARY = VBARY
teff = table["TEFF"]
self.teff = teff
logg = table["LOGG"]
self.logg = logg
feh = table["FEH"]
self.feh = feh
self.chi_inf = table["CHIINF"]
self.chi_mix = table["CHIMIX"]
self.BJD = table["BJD"]
self.fiber = table["FIBER"]
self.SNR =table["SNR"]
def plot_teff_logg(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
# shift is in km/s
shift = self.shift[mask]*1000
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(logg,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm)
ax1.set_ylabel('Teff $K$', fontsize=20)
ax1.set_xlabel('Logg ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(logg,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("RV shifts $m/s$", fontsize=20)
f.suptitle("Teff vs Logg for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_logg_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_teff_feh(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
shift = self.shift[mask] * 1000
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(feh,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm)
ax1.set_ylabel('Teff $K$', fontsize=20)
ax1.set_xlabel('FeH ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(feh,teff, marker='x', c=shift,
vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("RV shifts $m/s$", fontsize=20)
f.suptitle("Teff vs FeH for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_feh_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_teff_logg_bac(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
shift = self.shift[mask]
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
low = 0
up = 3
ax1.scatter(logg,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm)
ax1.set_ylabel('Teff $K$', fontsize=20)
ax1.set_xlabel('Logg ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(logg,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("2b-a-c", fontsize=20)
f.suptitle("Teff vs Logg for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_logg_rc_2bac" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_teff_feh_bac(self):
# only show visits with 2b>a+c
mask = self.mask
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
shift = self.shift[mask]
a = self.a
b = self.b
c = self.c
bac = (2*b-a-c)[mask]
low = 0
up = 3
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(feh,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm)
ax1.set_ylabel('Teff $K$', fontsize=20)
ax1.set_xlabel('FeH ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(feh,teff, marker='x', c=bac,
vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("2b-a-c", fontsize=20)
f.suptitle("Teff vs FeH for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_feh_rc_2bac" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_shift_bjd(self):
mask = self.mask
shift =self.shift[mask]
BJD = self.BJD[mask]
feh = self.feh[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(BJD,shift, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm)
ax1.set_xlabel('BJD', fontsize=20)
ax1.set_ylabel('RV shift $km/s$ ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(BJD,shift, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Fe/H", fontsize=20)
f.suptitle("RV shift vs BJD for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_shift_vs_BJD_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_rv_fiber(self):
mask = self.mask
a = self.a[mask]
b = self.b[mask]
c = self.c[mask]
fiber = self.fiber[mask]
SNR = self.SNR[mask]
portion = (c+a)/(a+b+c)
RV = (c - a) / (a + b + c) * 4144.68
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(fiber,RV, marker='x', c=SNR,
vmin=np.min(SNR), vmax=np.max(SNR), alpha=alpha, cmap=cm.coolwarm)
ax1.set_xlabel('FiberID', fontsize=20)
ax1.set_ylabel('RV shift $m/s$', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(fiber,RV, marker='x', c=SNR,
vmin=np.min(SNR), vmax=np.max(SNR), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("SNR", fontsize=20)
f.suptitle("RV shifts vs FiberID for the red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_shift_vs_Fiber_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_ac_fiber(self):
mask = self.mask
a = self.a[mask]
b = self.b[mask]
c = self.c[mask]
fiber = self.fiber[mask]
portion = (c+a)/(a+b+c)
RV = (c - a) / (a + b + c) * 4144.68
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(fiber,portion, marker='x', c=RV,
vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm)
ax1.set_xlabel('FiberID', fontsize=20)
ax1.set_ylabel('$(c+a)/(a+b+c)$ ', fontsize=20)
axes = plt.gca()
axes.set_ylim([-1,1])
f.subplots_adjust(right=0.8)
pl = ax1.scatter(fiber,portion, marker='x', c=RV,
vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("RV shifts $m/s$", fontsize=20)
f.suptitle("$(c+a)/(a+b+c)$ vs FiberID for the red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "ac_vs_Fiber_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_delta_chi_SNR(self):
mask = self.mask
delta_chi = (self.chi_inf-self.chi_mix)[mask]
SNR = self.SNR[mask]
RV = self.shift[mask]
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(SNR,delta_chi, marker='x', c=RV,
vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm)
ax1.set_xlabel('SNR', fontsize=20)
ax1.set_ylabel('Delta chi squared ', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(SNR,delta_chi, marker='x', c=RV,
vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("RV shifts $m/s$", fontsize=20)
f.suptitle("Delta chi squared vs SNR for the red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "dchi_vs_SNR_rc" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def histogram_shift_abc(self):
a = self.a
b = self.b
c = self.c
RV = (c-a)/(a+b+c)*4144.68
# add a mask: only show results with 2b>a+c
mask = 2*b>a+c
a = a[mask]
b = b[mask]
c = c[mask]
RV = RV[mask]
font = {'weight': 'bold', 'size': 15}
matplotlib.rc('font', **font)
f, ((ax1, ax2), (ax3, ax4)) = \
plt.subplots(2, 2)
colors = ["cyan",'b', 'g', 'r']
name = ["RV","a", "b", "c"]
# histogram of rv
#ax1
rms_RV = (np.nansum(RV*RV)/len(RV))**0.5
rms_a = (np.nansum(a * a) / len(a)) ** 0.5
rms_b = (np.nansum(b*b) / len(b)) ** 0.5
rms_c = (np.nansum(c * c) / len(c)) ** 0.5
ax1.hist(RV, bins=40, color=colors[0], label="%s RMS = %.2f $m/s$"%(name[0],rms_RV))
#ax1.set_title('Histogram of Radial velocity shifts', fontsize=30)
ax1.set_xlabel('values of radial velocity shifts $m/s$', fontsize=15)
ax1.set_ylabel('Number', fontsize=15)
ax1.legend(prop={'size': 15})
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of a
#ax2
ax2.hist(a, bins=40, color=colors[1], label="%s RMS = %.2f"%(name[1],rms_a))
#ax2.set_title('Histogram of parameter a', fontsize=30)
ax2.set_xlabel('values of parameter a', fontsize=15)
ax2.set_ylabel('Number', fontsize=15)
ax2.legend(prop={'size': 15})
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of b
#ax3
ax3.hist(b, bins=40, color=colors[2], label="%s RMS = %.2f"%(name[2],rms_b))
ax3.legend(prop={'size': 15})
#ax3.set_title('Histogram of paramete b', fontsize=30)
ax3.set_xlabel("values of parameter b", fontsize=15)
ax3.set_ylabel('Number', fontsize=15)
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
# histogram of c
#ax4
ax4.hist(c, bins=40, color=colors[3], label="%s RMS = %.2f"%(name[3],rms_c))
ax4.legend(prop={'size': 15})
#ax4.set_title('Histogram of parameter c', fontsize=30)
ax4.set_xlabel("values of parameter c", fontsize=15)
ax4.set_ylabel('Number', fontsize=15)
# add vertical grey line
# ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5)
f.suptitle("Histogram of RV shifts, a, b and c for the red clumps in DR13",fontsize=25)
f.legends
#f.suptitle("Histogram of RV shifts, a, b and c by using the absorption lines")
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "histogram_rv_shift_rc" + ".png"
fig.savefig(save_path, dpi=500)
plt.close()
# RV before after
def plot_RV_std_before_after_teff(self):
mask = self.mask
shift =self.shift[mask]
VBARY = self.VBARY[mask]
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
# From the average (c+a)/(a+b+c)
# Do put a mask here
mask = self.mask
# add points with the same fiberid together
name = self.name[mask]
target = list(set(name))
VBARY = self.VBARY[mask]
shift =self.shift[mask]
#SNR = self.SNR[mask]
fusion_new = []
# name+std_old and std_new + Teff logg feh
for i in range(0,len(target)):
print("Doing %.2f %%"%(i/len(target)*100))
index = np.where(name == target[i])
index = np.array(index)
index = index.ravel()
std_old_i = np.std(VBARY[index])
std_new_i = np.std(VBARY[index]+shift[index])
teff_i = np.nanmedian(teff[index])
logg_i = np.nanmedian(logg[index])
feh_i = np.nanmedian(feh[index])
fusion_new.append([target[i],std_old_i,std_new_i,teff_i,logg_i,feh_i])
fusion_new = np.array(fusion_new)
self.fusion_new = fusion_new
# portion+fiber+rv
# name = fusion_new[:, 0]
std_old = np.array(fusion_new[:,1],dtype=np.float32).ravel()
std_new = np.array(fusion_new[:,2],dtype=np.float32).ravel()
# use int
teff = np.array(fusion_new[:,3],dtype=np.float16).ravel()
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(std_old,std_new, marker='x', c=teff,
vmin=np.min(teff), vmax=np.max(teff), alpha=alpha, cmap=cm.coolwarm)
ax1.plot(std_old,std_old,"k",alpha=alpha,linewidth=0.3)
ax1.set_xlabel('Std of RVs before the correction $km/s$', fontsize=20)
ax1.set_ylabel('Std of RVs after the correction $km/s$', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(std_old,std_new, marker='x', c=teff,
vmin=np.min(teff), vmax=np.max(teff), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("Teff $K$", fontsize=20)
f.suptitle("Std of RVs before vs after the correction for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_std_before_after_teff" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_RV_std_before_after_logg(self):
mask = self.mask
shift =self.shift[mask]
VBARY = self.VBARY[mask]
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
fusion_new =self.fusion_new
# name = fusion_new[:, 0]
std_old = np.array(fusion_new[:,1],dtype=np.float32).ravel()
std_new = np.array(fusion_new[:,2],dtype=np.float32).ravel()
logg = np.array(fusion_new[:,4],dtype=np.float16).ravel()
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(std_old,std_new, marker='x', c=logg,
vmin=np.min(logg), vmax=np.max(logg), alpha=alpha, cmap=cm.coolwarm)
ax1.plot(std_old,std_old, "k", alpha=alpha, linewidth=0.3)
ax1.set_xlabel('Std of RVs before the correction $km/s$', fontsize=20)
ax1.set_ylabel('Sts of RVs after the correction $km/s$', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(std_old,std_new, marker='x', c=logg,
vmin=np.min(logg), vmax=np.max(logg), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("logg", fontsize=20)
f.suptitle("Std of RVs before vs after the correction for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_std_before_after_logg" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
def plot_RV_std_before_after_feh(self):
mask = self.mask
shift =self.shift[mask]
VBARY = self.VBARY[mask]
teff = self.teff[mask]
logg = self.logg[mask]
feh = self.feh[mask]
fusion_new =self.fusion_new
# name = fusion_new[:, 0]
std_old = np.array(fusion_new[:,1],dtype=np.float32).ravel()
std_new = np.array(fusion_new[:,2],dtype=np.float32).ravel()
feh = np.array(fusion_new[:,5],dtype=np.float16).ravel()
font = {'family': 'normal',
'weight': 'bold',
'size': 14}
matplotlib.rc('font', **font)
f, ax1 = plt.subplots(1,1)
alpha = 0.3
#ax1
ax1.scatter(std_old,std_new, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm)
ax1.plot(std_old,std_old, "k", alpha=alpha, linewidth=0.3)
ax1.set_xlabel('Std of RVs before the correction $km/s$', fontsize=20)
ax1.set_ylabel('Std of RVs after the correction $km/s$', fontsize=20)
f.subplots_adjust(right=0.8)
pl = ax1.scatter(std_old,std_new, marker='x', c=feh,
vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm)
cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7])
cb = f.colorbar(pl, cax=cbar_ax)
cb.set_label("FeH", fontsize=20)
f.suptitle("Std of RVs before vs after the correction for red clumps in DR13", fontsize=30)
# save them:
fig = matplotlib.pyplot.gcf()
# adjust the size based on the number of visit
fig.set_size_inches(14.5, 8.5)
save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_std_before_after_feh" +".png"
fig.savefig(save_path, dpi=500)
plt.close()
model = plot()
model.read_table()
"""
model.plot_teff_logg()
model.plot_teff_feh()
model.plot_teff_logg_bac()
model.plot_teff_feh_bac()
model.plot_rv_fiber()
model.plot_ac_fiber()
"""
#VBARY vs
model.plot_RV_std_before_after_teff()
model.plot_RV_std_before_after_logg()
model.plot_RV_std_before_after_feh()
|
peraktong/Cannon-Experiment
|
DR13/0330_read_table_rc.py
|
Python
|
mit
| 24,195
|
[
"VisIt"
] |
082925b61b69a4b078e790d72816e40eb7d42bdd210378d0b9092538ecb5dfc4
|
"""
Local Laplacian, see e.g. Aubry et al 2011, "Fast and Robust Pyramid-based Image Processing".
"""
from __future__ import print_function
# TODO: This allows you to use "true" div (vs floordiv) in Python2 for the / operator;
# unfortunately it appears to also replace the overloads we've carefully added for Halide.
# Figure out if it's possible to allow this to leave our Halide stuff unaffected.
#
# from __future__ import division
import halide as hl
import numpy as np
from scipy.misc import imread, imsave
import os.path
int_t = hl.Int(32)
float_t = hl.Float(32)
def get_local_laplacian(input, levels, alpha, beta, J=8):
downsample_counter=[0]
upsample_counter=[0]
x = hl.Var('x')
y = hl.Var('y')
def downsample(f):
downx, downy = hl.Func('downx%d'%downsample_counter[0]), hl.Func('downy%d'%downsample_counter[0])
downsample_counter[0] += 1
downx[x,y,c] = (f[2*x-1,y,c] + 3.0*(f[2*x,y,c]+f[2*x+1,y,c]) + f[2*x+2,y,c])/8.0
downy[x,y,c] = (downx[x,2*y-1,c] + 3.0*(downx[x,2*y,c]+downx[x,2*y+1,c]) + downx[x,2*y+2,c])/8.0
return downy
def upsample(f):
upx, upy = hl.Func('upx%d'%upsample_counter[0]), hl.Func('upy%d'%upsample_counter[0])
upsample_counter[0] += 1
upx[x,y,c] = 0.25 * f[(x//2) - 1 + 2*(x%2),y,c] + 0.75 * f[x//2,y,c]
upy[x,y,c] = 0.25 * upx[x, (y//2) - 1 + 2*(y%2),c] + 0.75 * upx[x,y//2,c]
return upy
def downsample2D(f):
downx, downy = hl.Func('downx%d'%downsample_counter[0]), hl.Func('downy%d'%downsample_counter[0])
downsample_counter[0] += 1
downx[x,y] = (f[2*x-1,y] + 3.0*(f[2*x,y]+f[2*x+1,y]) + f[2*x+2,y])/8.0
downy[x,y] = (downx[x,2*y-1] + 3.0*(downx[x,2*y]+downx[x,2*y+1]) + downx[x,2*y+2])/8.0
return downy
def upsample2D(f):
upx, upy = hl.Func('upx%d'%upsample_counter[0]), hl.Func('upy%d'%upsample_counter[0])
upsample_counter[0] += 1
upx[x,y] = 0.25 * f[(x//2) - 1 + 2*(x%2),y] + 0.75 * f[x//2,y]
upy[x,y] = 0.25 * upx[x, (y//2) - 1 + 2*(y%2)] + 0.75 * upx[x,y//2]
return upy
# THE ALGORITHM
# loop variables
c = hl.Var('c')
k = hl.Var('k')
# Make the remapping function as a lookup table.
remap = hl.Func('remap')
fx = hl.cast(float_t, x/256.0)
#remap[x] = alpha*fx*exp(-fx*fx/2.0)
remap[x] = alpha*fx*hl.exp(-fx*fx/2.0)
# Convert to floating point
floating = hl.Func('floating')
floating[x,y,c] = hl.cast(float_t, input[x,y,c]) / 65535.0
# Set a boundary condition
clamped = hl.Func('clamped')
clamped[x,y,c] = floating[hl.clamp(x, 0, input.width()-1), hl.clamp(y, 0, input.height()-1), c]
# Get the luminance channel
gray = hl.Func('gray')
gray[x,y] = 0.299*clamped[x,y,0] + 0.587*clamped[x,y,1] + 0.114*clamped[x,y,2]
# Make the processed Gaussian pyramid.
gPyramid = [hl.Func('gPyramid%d'%i) for i in range(J)]
# Do a lookup into a lut with 256 entires per intensity level
level = k / (levels - 1)
idx = gray[x,y]*hl.cast(float_t, levels-1)*256.0
idx = hl.clamp(hl.cast(int_t, idx), 0, (levels-1)*256)
gPyramid[0][x,y,k] = beta*(gray[x, y] - level) + level + remap[idx - 256*k]
for j in range(1,J):
gPyramid[j][x,y,k] = downsample(gPyramid[j-1])[x,y,k]
# Get its laplacian pyramid
lPyramid = [hl.Func('lPyramid%d'%i) for i in range(J)]
lPyramid[J-1] = gPyramid[J-1]
for j in range(J-1)[::-1]:
lPyramid[j][x,y,k] = gPyramid[j][x,y,k] - upsample(gPyramid[j+1])[x,y,k]
# Make the Gaussian pyramid of the input
inGPyramid = [hl.Func('inGPyramid%d'%i) for i in range(J)]
inGPyramid[0] = gray
for j in range(1,J):
inGPyramid[j][x,y] = downsample2D(inGPyramid[j-1])[x,y]
# Make the laplacian pyramid of the output
outLPyramid = [hl.Func('outLPyramid%d'%i) for i in range(J)]
for j in range(J):
# Split input pyramid value into integer and floating parts
level = inGPyramid[j][x,y]*hl.cast(float_t, levels-1)
li = hl.clamp(hl.cast(int_t, level), 0, levels-2)
lf = level - hl.cast(float_t, li)
# Linearly interpolate between the nearest processed pyramid levels
outLPyramid[j][x,y] = (1.0-lf)*lPyramid[j][x,y,li] + lf*lPyramid[j][x,y,li+1]
# Make the Gaussian pyramid of the output
outGPyramid = [hl.Func('outGPyramid%d'%i) for i in range(J)]
outGPyramid[J-1] = outLPyramid[J-1]
for j in range(J-1)[::-1]:
outGPyramid[j][x,y] = upsample2D(outGPyramid[j+1])[x,y] + outLPyramid[j][x,y]
# Reintroduce color (Connelly: use eps to avoid scaling up noise w/ apollo3.png input)
color = hl.Func('color')
eps = 0.01
color[x,y,c] = outGPyramid[0][x,y] * (clamped[x,y,c] + eps) / (gray[x,y] + eps)
output = hl.Func('local_laplacian')
# Convert back to 16-bit
output[x,y,c] = hl.cast(hl.UInt(16), hl.clamp(color[x,y,c], 0.0, 1.0) * 65535.0)
# THE SCHEDULE
remap.compute_root()
target = hl.get_target_from_environment()
if target.has_gpu_feature():
# GPU Schedule
print ("Compiling for GPU")
xi, yi = hl.Var("xi"), hl.Var("yi")
output.compute_root().gpu_tile(x, y, 32, 32, GPU_Default)
for j in range(J):
blockw = 32
blockh = 16
if j > 3:
blockw = 2
blockh = 2
if j > 0:
inGPyramid[j].compute_root().gpu_tile(x, y, xi, yi, blockw, blockh, GPU_Default)
if j > 0:
gPyramid[j].compute_root().reorder(k, x, y).gpu_tile(x, y, xi, yi, blockw, blockh, GPU_Default)
outGPyramid[j].compute_root().gpu_tile(x, y, xi, yi, blockw, blockh, GPU_Default)
else:
# CPU schedule
print ("Compiling for CPU")
output.parallel(y, 4).vectorize(x, 4);
gray.compute_root().parallel(y, 4).vectorize(x, 4);
for j in range(4):
if j > 0:
inGPyramid[j].compute_root().parallel(y, 4).vectorize(x, 4)
if j > 0:
gPyramid[j].compute_root().parallel(y, 4).vectorize(x, 4)
outGPyramid[j].compute_root().parallel(y).vectorize(x, 4)
for j in range(4,J):
inGPyramid[j].compute_root().parallel(y)
gPyramid[j].compute_root().parallel(k)
outGPyramid[j].compute_root().parallel(y)
return output
def generate_compiled_file(local_laplacian):
# Need to copy the process executable from the C++ apps/local_laplacian folder to run this.
# (after making it of course)
arguments = ArgumentsVector()
arguments.append(Argument('levels', False, int_t))
arguments.append(Argument('alpha', False, float_t))
arguments.append(Argument('beta', False, float_t))
arguments.append(Argument('input', True, hl.UInt(16)))
target = hl.get_target_from_environment()
local_laplacian.compile_to_file("local_laplacian", arguments, "local_laplacian", target)
print("Generated compiled file for local_laplacian function.")
return
def get_input_data():
image_path = os.path.join(os.path.dirname(__file__), "../../apps/images/rgb.png")
assert os.path.exists(image_path), \
"Could not find %s" % image_path
rgb_data = imread(image_path)
#print("rgb_data", type(rgb_data), rgb_data.shape, rgb_data.dtype)
input_data = np.copy(rgb_data.astype(np.uint16), order="F") << 8
# input data is in range [0, 256*256]
#print("input_data", type(input_data), input_data.shape, input_data.dtype)
return input_data
def filter_test_image(local_laplacian, input):
local_laplacian.compile_jit()
# preparing input and output memory buffers (numpy ndarrays)
input_data = get_input_data()
input_image = hl.Buffer(input_data)
input.set(input_image)
output_data = np.empty(input_data.shape, dtype=input_data.dtype, order="F")
output_image = hl.Buffer(output_data)
if False:
print("input_image", input_image)
print("output_image", output_image)
# do the actual computation
local_laplacian.realize(output_image)
# save results
input_path = "local_laplacian_input.png"
output_path = "local_laplacian.png"
imsave(input_path, input_data)
imsave(output_path, output_data)
print("\nlocal_laplacian realized on output_image.")
print("Result saved at '", output_path,
"' ( input data copy at '", input_path, "' ).", sep="")
return
def main():
input = hl.ImageParam(hl.UInt(16), 3, 'input')
# number of intensity levels
levels = hl.Param(int_t, 'levels', 8)
#Parameters controlling the filter
alpha = hl.Param(float_t, 'alpha', 1.0/7.0)
beta = hl.Param(float_t, 'beta', 1.0)
local_laplacian = get_local_laplacian(input, levels, alpha, beta)
generate = False # Set to False to run the jit immediately and get instant gratification.
if generate:
generate_compiled_file(local_laplacian)
else:
filter_test_image(local_laplacian, input)
return
if __name__ == '__main__':
main()
|
Trass3r/Halide
|
python_bindings/apps/local_laplacian.py
|
Python
|
mit
| 9,109
|
[
"Gaussian"
] |
39f865aa981d8f559491396cd2960e96cbb2177d23da80fc2e516dd1c0f14c1a
|
"""
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
|
RPGOne/Skynet
|
scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/linear_model/plot_ard.py
|
Python
|
bsd-3-clause
| 2,622
|
[
"Gaussian"
] |
7cbccaf6fd3f03821b409d0e968543731b398ae9387e8a8169bb5d2b9111815d
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import numpy as np
from psi4 import core
from .. import empirical_dispersion
def fisapt_compute_energy(self):
"""Computes the FSAPT energy. FISAPT::compute_energy"""
# => Header <=
self.print_header()
# => Zero-th Order Wavefunction <=
core.timer_on("FISAPT: Setup")
self.localize()
self.partition()
self.overlap()
self.kinetic()
self.nuclear()
self.coulomb()
core.timer_off("FISAPT: Setup")
core.timer_on("FISAPT: Monomer SCF")
self.scf()
core.timer_off("FISAPT: Monomer SCF")
self.freeze_core()
self.unify()
core.timer_on("FISAPT: Subsys E")
self.dHF()
core.timer_off("FISAPT: Subsys E")
# => SAPT0 <=
core.timer_on("FISAPT:SAPT:elst")
self.elst()
core.timer_off("FISAPT:SAPT:elst")
core.timer_on("FISAPT:SAPT:exch")
self.exch()
core.timer_off("FISAPT:SAPT:exch")
core.timer_on("FISAPT:SAPT:ind")
self.ind()
core.timer_off("FISAPT:SAPT:ind")
if not core.get_option("FISAPT", "FISAPT_DO_FSAPT"):
core.timer_on("FISAPT:SAPT:disp")
self.disp(self.matrices(), self.vectors(), True) # Expensive, only do if needed # unteseted translation of below
# self.disp(matrices_, vectors_, true) # Expensive, only do if needed
core.timer_off("FISAPT:SAPT:disp")
# => F-SAPT0 <=
if core.get_option("FISAPT", "FISAPT_DO_FSAPT"):
core.timer_on("FISAPT:FSAPT:loc")
self.flocalize()
core.timer_off("FISAPT:FSAPT:loc")
core.timer_on("FISAPT:FSAPT:elst")
self.felst()
core.timer_off("FISAPT:FSAPT:elst")
core.timer_on("FISAPT:FSAPT:exch")
self.fexch()
core.timer_off("FISAPT:FSAPT:exch")
core.timer_on("FISAPT:FSAPT:ind")
self.find()
core.timer_off("FISAPT:FSAPT:ind")
if core.get_option("FISAPT", "FISAPT_DO_FSAPT_DISP"):
core.timer_on("FISAPT:FSAPT:disp")
self.fdisp()
core.timer_off("FISAPT:FSAPT:disp")
#else:
# # Build Empirical Dispersion
# dashD = empirical_dispersion.EmpiricalDispersion(name_hint='SAPT0-D3M')
# dashD.print_out()
# # Compute -D
# Edisp = dashD.compute_energy(core.get_active_molecule())
# core.set_variable('{} DISPERSION CORRECTION ENERGY'.format(dashD.fctldash), Edisp) # Printing
# text = []
# text.append(" => {}: Empirical Dispersion <=".format(dashD.fctldash.upper()))
# text.append(" ")
# text.append(dashD.description)
# text.append(dashD.dashlevel_citation.rstrip())
# text.append("\n Empirical Dispersion Energy [Eh] = {:24.16f}\n".format(Edisp))
# text.append('\n')
# core.print_out('\n'.join(text))
self.fdrop()
# => Scalar-Field Analysis <=
if core.get_option("FISAPT", "FISAPT_DO_PLOT"):
core.timer_on("FISAPT:FSAPT:cubeplot")
self.plot()
core.timer_off("FISAPT:FSAPT:cubeplot")
# => Summary <=
self.print_trailer()
def fisapt_fdrop(self):
"""Drop output files from FSAPT calculation. FISAPT::fdrop"""
core.print_out(" ==> F-SAPT Output <==\n\n")
filepath = core.get_option("FISAPT", "FISAPT_FSAPT_FILEPATH")
os.makedirs(filepath, exist_ok=True)
core.print_out(" F-SAPT Data Filepath = {}\n\n".format(filepath))
geomfile = filepath + os.sep + 'geom.xyz'
xyz = self.molecule().to_string(dtype='xyz', units='Angstrom')
with open(geomfile, 'w') as fh:
fh.write(xyz)
vectors = self.vectors()
matrices = self.matrices()
matrices["Qocc0A"].name = "QA"
matrices["Qocc0B"].name = "QB"
matrices["Elst_AB"].name = "Elst"
matrices["Exch_AB"].name = "Exch"
matrices["IndAB_AB"].name = "IndAB"
matrices["IndBA_AB"].name = "IndBA"
_drop(vectors["ZA"], filepath)
_drop(vectors["ZB"], filepath)
_drop(matrices["Qocc0A"], filepath)
_drop(matrices["Qocc0B"], filepath)
_drop(matrices["Elst_AB"], filepath)
_drop(matrices["Exch_AB"], filepath)
_drop(matrices["IndAB_AB"], filepath)
_drop(matrices["IndBA_AB"], filepath)
if core.get_option("FISAPT", "FISAPT_DO_FSAPT_DISP"):
matrices["Disp_AB"].name = "Disp"
_drop(matrices["Disp_AB"], filepath)
if core.get_option("FISAPT", "SSAPT0_SCALE"):
ssapt_filepath = core.get_option("FISAPT", "FISAPT_FSSAPT_FILEPATH")
os.makedirs(ssapt_filepath, exist_ok=True)
core.print_out(" sF-SAPT Data Filepath = {}\n\n".format(ssapt_filepath))
geomfile = ssapt_filepath + os.sep + 'geom.xyz'
with open(geomfile, 'w') as fh:
fh.write(xyz)
matrices["sIndAB_AB"].name = "IndAB"
matrices["sIndBA_AB"].name = "IndBA"
_drop(vectors["ZA"], ssapt_filepath)
_drop(vectors["ZB"], ssapt_filepath)
_drop(matrices["Qocc0A"], ssapt_filepath)
_drop(matrices["Qocc0B"], ssapt_filepath)
_drop(matrices["Elst_AB"], ssapt_filepath)
_drop(matrices["Exch_AB"], ssapt_filepath)
_drop(matrices["sIndAB_AB"], ssapt_filepath)
_drop(matrices["sIndBA_AB"], ssapt_filepath)
if core.get_option("FISAPT", "FISAPT_DO_FSAPT_DISP"):
matrices["sDisp_AB"].name = "Disp"
_drop(matrices["sDisp_AB"], ssapt_filepath)
def fisapt_plot(self):
"""Filesystem wrapper for FISAPT::plot."""
filepath = core.get_option("FISAPT", "FISAPT_PLOT_FILEPATH")
os.makedirs(filepath, exist_ok=True)
geomfile = filepath + os.sep + 'geom.xyz'
xyz = self.molecule().to_string(dtype='xyz', units='Angstrom')
with open(geomfile, 'w') as fh:
fh.write(xyz)
self.raw_plot(filepath)
def _drop(array, filepath):
"""Helper to drop array to disk. FISAPT::drop
Parameters
----------
array : psi4.core.Matrix or psi4.core.Vector
Matrix or vector to be written disk in plain text.
filepath : str
Full or partial file path. `array` will be written
to <filepath>/<array.name>.dat.
Returns
-------
None
Notes
-----
Equivalent to https://github.com/psi4/psi4archive/blob/master/psi4/src/psi4/fisapt/fisapt.cc#L4389-L4420
"""
filename = filepath + os.sep + array.name + '.dat'
with open(filename, 'wb') as handle:
np.savetxt(handle, array.to_array(), fmt="%24.16E", delimiter=' ', newline='\n')
core.FISAPT.compute_energy = fisapt_compute_energy
core.FISAPT.fdrop = fisapt_fdrop
core.FISAPT.plot = fisapt_plot
|
jgonthier/psi4
|
psi4/driver/procrouting/sapt/fisapt_proc.py
|
Python
|
lgpl-3.0
| 7,495
|
[
"Psi4"
] |
c1834a188b4b5276306b7f3942488db3954099eab6d6db7b0ae154a9c3e1c0c4
|
#!/usr/bin/env python
# -*- coding: Latin-1 -*-
##
def syntax():
print "syntax: python runner.py Win32|x64|all"
##
r"""runner.py - run some tests on the exiv2 build"""
##
import sys
import os.path
##
def Q(path):
return '"' + path + '"'
##
##
def System(cmd):
# print "System ",cmd
sys.stdout.flush()
os.system(cmd)
##
def exe(path,option):
"""exe - handle exiv2.exe file"""
if os.path.basename(path)=='exiv2.exe':
# print "testing ",path
testimages=os.path.realpath('testimages')
tif=os.path.join(testimages,'test.tiff')
png=os.path.join(testimages,'test.png')
jpg=os.path.join(testimages,'test.jpg')
System(path + " -V")
System(path + " -pt "+Q(tif) + '2>NUL | grep Original')
System(path + " -pt "+Q(png) + '2>NUL | grep Original')
System(path + " -pt "+Q(jpg) + '2>NUL | grep Original')
System(path + " -pt "+Q(jpg) )
##
##
def dll(path,option):
"""dll - handle a .dll file"""
if os.path.basename(path) in ('exiv2.exe,exiv2.dll','exiv2d.dll','libexpat.dll','zlib1.dll','zlib1d.dll'):
# print "testing ",path
bits=32 if path.find('Win32')>=0 else 64
depends='tools/bin/depends%d.exe' % (bits)
depends=os.path.realpath( depends )
System(depends + ' -q ' + path + ' | sort')
##
##
def visit(myData, directoryName, filesInDirectory): # called for each dir
"""visit - called by os.path.walk"""
# print "in visitor",directoryName, "myData = ",myData
# print "filesInDirectory => ",filesInDirectory
for filename in filesInDirectory: # do non-dir files here
pathname = os.path.join(directoryName, filename)
if not os.path.isdir(pathname):
global paths
paths.append(pathname)
##
##
def handle(paths,handlers):
for path in sorted(paths):
ext=os.path.splitext(path)[1].lower()
if handlers.has_key(ext):
handlers[ext](path,option)
##
##
def runner(option):
"""runner -option == None, means both x64 and Win32"""
if option in set(['x64','Win32',None]):
directory = os.path.abspath(os.path.dirname(sys.argv[0]))
directory = os.path.join(directory,"bin")
if option:
directory = os.path.join(directory,option)
global paths
paths=[]
os.path.walk(directory, visit, None)
handle(paths,{ '.exe' : exe } )
handle(paths,{ '.dll' : dll } )
handle(paths,{ '.exe' : dll } )
else:
syntax()
##
##
if __name__ == '__main__':
argc = len(sys.argv)
syntaxError = argc < 2
if not syntaxError:
option='all'
if argc>1:
option=sys.argv[1].lower()
options = { 'x64' : 'x64'
, 'x86' : 'Win32'
, 'win32' : 'Win32'
, 'all' : None
, 'both' : None
}
syntaxError = not options.has_key(option)
if not syntaxError:
runner(options[option])
if syntaxError:
syntax()
# That's all Folks!
##
|
limbolily/exiv2
|
msvc2005/runner.py
|
Python
|
gpl-2.0
| 3,296
|
[
"VisIt"
] |
5db5c2687b0158a4368736b3beaf297abcb1f01ad7b44a29623b6320a30ea5f5
|
"""Utility commands to cycle active objects in pymol.
Adds cycler types to pymol:
pdbdircycler - Cycles through all pdbs in a given directory.
pdbdircyclerlite - Cycles through all pdbs in a given directory, loading one object at a time.
pdblistfilecycler - Cycles through all pdbs listed in a file.
pdblistfilecyclerlite - Cycles through all pdbs listed in a file, loading one object at a time.
objcycler - Cycles though all objects.
Adds cycler commands:
set_cycler_command - Sets command run on each cycler iteration. Use to init object representation in lite cyclers.
"""
import logging
logger = logging.getLogger("Cycler")
from pymol import cmd,viewing
import os,re
from glob import glob
from os import path
# ashworth
# minimal general classes to support convenient "list mode" behavior in pymol (left/right arrows cycle through list)
# the 'Lite' classes use the LoadDeleteCycler instead of the EnableCycler, in order that only a single pdb from the list is loaded into memory at any given time. These 'Lite' versions are preferable for large numbers of pdbs that would exceed system memory if loaded all at once.
####################################################################################################
# relates paths to object names in a way that matches the result of cmd.load
def objname(objpath):
return re.sub( r'(\.pdb|\.pdb.gz)$', '', path.basename(objpath))
# base class cycler for enable/disable behavior, with all objects (pdbs) preloaded
class EnableCycler(object):
def __init__(self):
self.current_index = 0
self.auto_zoom = False
self.onload_command = None
def iter(self,by=1):
#enabled = cmd.get_names('objects',enabled_only=1)[0]
choices = self.choices()
l = len(choices)
assert self.current_index < l
next_object = (self.current_index + by) % l
cmd.disable(objname(choices[self.current_index]))
self.current_index = next_object
cmd.enable(objname(choices[self.current_index]))
if self.auto_zoom:
cmd.zoom(objname(choices[self.current_index]))
if self.onload_command:
logging.debug("onload_command: %s", self.onload_command)
cmd.do(self.onload_command)
cmd.replace_wizard('message',choices[self.current_index])
def choices(self):
raise NotImplementedError("EnableCycler.choices")
# base class cycler for load/delete behavior (to be employed when there are too many pdbs to hold in memory all at once)
class LoadDeleteCycler(object):
def __init__(self):
self.auto_zoom = False
self.onload_command = None
def iter(self,by=1):
loaded = cmd.get_names('objects')[0]
choices = self.choices()
l = len(choices)
next_file = 0
for i in range(l):
if objname(choices[i]) == loaded:
next_file = choices[ (i+by) % l ]
break
cmd.delete('all')
if not os.path.exists(next_file):
raise ValueError("Can not locate file: %s" % next_file)
cmd.load(next_file)
if self.auto_zoom:
cmd.zoom()
if self.onload_command:
logging.debug("onload_command: %s", self.onload_command)
cmd.do(self.onload_command)
cmd.replace_wizard('message',next_file)
def choices(self):
raise NotImplementedError("EnableCycler.choices")
####################################################################################################
# cycler over all pdbs in directory
class PDBDirCycler(EnableCycler):
def __init__(self,target_dir='.'):
super(PDBDirCycler, self).__init__()
self.pdbs = glob(path.join(target_dir, "*.pdb*"))
self.loadpdbs()
def loadpdbs(self):
for pdb in self.pdbs:
cmd.load(pdb)
cmd.disable(objname(pdb))
cmd.enable(objname(self.pdbs[0]))
def choices(self):
return self.pdbs
class PDBDirCyclerLite(LoadDeleteCycler):
def __init__(self,target_dir='.'):
super(PDBDirCyclerLite, self).__init__()
self.pdbs = [ f for f in os.listdir(target_dir) if re.search('.pdb.*$',f) ]
pdb = self.pdbs[0]
cmd.load(pdb)
def choices(self):
return self.pdbs
####################################################################################################
# cycler over pdbs in list file
class PDBListFileCycler(EnableCycler):
def __init__(self,list_file):
super(PDBListFileCycler, self).__init__()
self.pdbs = [ l.strip() for l in open(list_file) ]
self.loadpdbs()
def loadpdbs(self):
for pdb in self.pdbs:
cmd.load(pdb)
cmd.disable(objname(pdb))
cmd.enable(objname(self.pdbs[0]))
def choices(self):
return self.pdbs
class PDBListFileCyclerLite(LoadDeleteCycler):
def __init__(self,list_file):
super(PDBListFileCyclerLite, self).__init__()
self.pdbs = [ l.strip() for l in open(list_file) ]
pdb = self.pdbs[0]
cmd.load(pdb)
def choices(self):
return self.pdbs
####################################################################################################
# cycler over all PyMOL objects (including new ones)
class ObjectCycler(EnableCycler):
def __init__(self):
super(ObjectCycler, self).__init__()
cmd.disable('all')
cmd.enable( cmd.get_names('objects')[0] )
def choices(self):
return cmd.get_names('objects')
####################################################################################################
def prev_pdb():
viewing.cycler.iter(-1)
def next_pdb():
viewing.cycler.iter(1)
def spawnPDBDirCycler(target_dir='.',lite=False):
if lite: viewing.cycler = PDBDirCyclerLite(target_dir)
else: viewing.cycler = PDBDirCycler(target_dir)
cmd.set_key('left',prev_pdb)
cmd.set_key('right',next_pdb)
def spawnPDBListFileCycler(list_file,lite=False):
if lite: viewing.cycler = PDBListFileCyclerLite(list_file)
else: viewing.cycler = PDBListFileCycler(list_file)
cmd.set_key('left',prev_pdb)
cmd.set_key('right',next_pdb)
def spawnObjectCycler():
viewing.cycler = ObjectCycler()
cmd.set_key('left',prev_pdb)
cmd.set_key('right',next_pdb)
def setCyclerOnloadCommand(command_string):
if command_string[0] == '"' or command_string[0] == "'":
command_string = command_string[1:-1]
if viewing.cycler:
logging.debug("Setting cycler onload_command: %s", command_string)
viewing.cycler.onload_command = command_string
cmd.extend( 'pdbdircycler', lambda dir='.': spawnPDBDirCycler('.',False) )
cmd.extend( 'pdbdircyclerlite', lambda dir='.': spawnPDBDirCycler('.',True) )
cmd.extend( 'pdblistfilecycler', lambda file: spawnPDBListFileCycler(file,False) )
cmd.extend( 'pdblistfilecyclerlite', lambda file: spawnPDBListFileCycler(file,True) )
cmd.extend( 'objcycler', spawnObjectCycler )
cmd.extend( 'set_cycler_command', setCyclerOnloadCommand )
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
weitzner/Dotfiles
|
pymol_scripts/Cycler.py
|
Python
|
mit
| 7,155
|
[
"PyMOL"
] |
00d4acf223b135dca86c0d0671aafd350ea6f5d194f71c6bb19b4212abec3707
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
# how should this go?
# should have pylada-parsed version of all VASP data.
# instead I have my own script that parses OUTCAR and
# draws plots
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import sys
from . import plotbs
from run_input import cations, anions
def main():
# this is really just a custom trigger to plot S. Lany's data with "MgOx" naming
four_char_names = False
plot_dft = False
if (len(sys.argv) < 3):
print("plot_fig <dir> <base OUTCAR name> [-fcn]")
print("-fcn triggers 4 character names")
sys.exit()
total_dir = sys.argv[1]
name = sys.argv[2]
# support for some special data sets
plotting_dft = False
if (len(sys.argv) > 3):
four_char_names = sys.argv[3] == "-fcn"
plotting_dft = sys.argv[3] == "-dft"
def map_ion(ion):
if (not four_char_names):
return ion
elif (len(ion) != 1):
return ion
elif ion == "O":
return "Ox"
elif ion == "S":
return "Su"
else:
return ion
ncat = len(cations)
nan = len(anions)
basedir = "/Users/pgraf/work/cid/pylada/nlep/"
subdir1 = "/nlep_materials_from_slany"
name1 = "OUTCAR_gw"
# if (four_char_names):
# subdir = "/nlep_materials_from_slany/slany_2-6/"
# name = "OUTCAR-single"
# else:
# subdir = "/opt/from_redmesa/all_2-6_leastsq/best_fit/"
# name = "OUTCAR"
# if (plot_dft):
# subdir = "/nlep_materials_from_slany"
# name = "OUTCAR_pbe"
ifig = 1
fig = plt.figure(1)
for icat in range(0, ncat):
cat = cations[icat]
for ian in range(0, nan):
an = anions[ian]
vb_idx_nlep = 3
bend_idx_nlep = 7
if (cat in ["Ga", "In", "Zn", "Cd"]):
vb_idx_nlep = 8
bend_idx_nlep = 12
vb_idx_gw = vb_idx_nlep
bend_idx_gw = bend_idx_nlep
if (cat == "Mg"):
vb_idx_gw = 6
bend_idx_gw = 12
if (plotting_dft):
vb_idx_nlep = 6
bend_idx_nlep = 12
an1 = an
cat1 = cat
an = map_ion(an) # for Stephans 4 character naming
cat = map_ion(cat)
args = "proc1.py --skipln --bend=%d %s/%s/%s%s_%s --matchvbm=%d,%d --bend=%d %s/%s%s_%s" % (
bend_idx_gw, basedir, subdir1, cat1, an1, name1, vb_idx_gw, vb_idx_nlep, bend_idx_nlep, total_dir, cat, an, name)
print(args)
args = args.split()
# plt.subplot(ncat, nan, ifig)
ax = fig.add_subplot(ncat, nan, ifig)
fig = plotbs.real_main(args, fig, ax, ifig)
tit = "%s%s" % (cat, an)
fig.text(.2 + ian / 3., 0.9 - icat / 3., tit)
fig.canvas.set_window_title("black = %s, red = %s,%s" % (name1, total_dir, name))
ifig += 1
plt.show()
if __name__ == '__main__':
main()
|
pylada/pylada-light
|
src/pylada/vasp/nlep/plotfit.py
|
Python
|
gpl-3.0
| 4,199
|
[
"CRYSTAL",
"VASP"
] |
e2ff38d8482c79a708c38e4603668efd7cec844ee832a3e3e88da592de7f43c4
|
import random
import requests
from helga import log
logger = log.getLogger(__name__)
class XKCDClient(object):
BASE = 'https://xkcd.com'
EXT = 'info.0.json'
def __init__(self):
self._sess = requests.Session()
def _request(self, comic_number=None):
url_args = [str(a) for a in (self.BASE, comic_number, self.EXT) if a is not None]
url = '/'.join(url_args)
logger.debug('Requesting comic at %s', url)
try:
resp = self._sess.get(url)
resp.raise_for_status()
return resp.json()
except requests.exceptions.HTTPError:
logger.exception("Got bad response from %s", url)
return None
def fetch_latest(self):
return self._request()
def fetch_number(self, number):
# XXX: hacking around the fact that 404 comic is not real.
if number == 404:
return {'img': 'http://i.imgur.com/utzTCyo.png', 'safe_title': "That's the joke.", 'alt': 'Visit the page for yourself', 'num': number}
return self._request(number)
def fetch_random(self, latest=None):
if latest is None:
latest = self.fetch_latest() or {'num': 1830}
random_selection = random.randint(1, latest.get('num') + 1)
return self._request(random_selection)
|
crlane/helga-xkcd
|
helga_xkcd/client.py
|
Python
|
mit
| 1,322
|
[
"VisIt"
] |
9c8d6771d5ee091d28eeb47c2dd6a97babd2de2a2701b70692f3f802aa87cc4e
|
import subprocess
import tempfile
from pathlib import Path
import numpy as np
from pysisyphus.config import get_cmd
from pysisyphus.constants import BOHR2ANG
from pysisyphus.helpers_pure import interpolate_colors
TPL_BASE = """
{orient}
function _setModelState() {{
select;
Spacefill 0.0;
frank off;
font frank 16.0 SansSerif Plain;
select *;
set fontScaling false;
background white
frank off
set showhydrogens True;
}}
_setModelState;
"""
CUBE_TPL = (
"load {cube_fn}"
+ TPL_BASE
+ """
isosurface cutoff {isoval} sign {colors} "{cube_fn}"
write image pngt "{png_fn}"
"""
)
def call_jmol(spt_str, show=False):
with tempfile.NamedTemporaryFile("w", suffix=".spt") as spt_handle:
spt_handle.write(spt_str)
spt_handle.flush()
jmol_cmd = [get_cmd("jmol"), "-n", spt_handle.name]
if show:
del jmol_cmd[1]
proc = subprocess.Popen(
jmol_cmd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
proc.wait()
stdout = proc.stdout.read()
stderr = proc.stderr.read()
return stdout, stderr
def render_cdd_cube(cdd_cube, isoval=0.002, orient=""):
png_fn = Path(cdd_cube).with_suffix(".png")
spt = CUBE_TPL.format(
orient=orient,
cube_fn=cdd_cube,
isoval=isoval,
colors="red blue",
png_fn=png_fn,
)
with open("jmol.spt", "w") as handle:
handle.write(spt)
stdout, stderr = call_jmol(spt)
return png_fn
def render_geom_and_charges(geom, point_charges):
point_charges = point_charges.copy()
point_charges[:, :3] *= BOHR2ANG
charges = point_charges[:, -1]
cr = np.array((255, 0, 0)) # red
cw = np.array((255, 255, 255)) # white
cb = np.array((0, 0, 255)) # blue
chrg_min = charges.min()
chrg_max = charges.max()
print(
f"charges:\n{np.array2string(point_charges, precision=4)}\n"
f"min(charges): {chrg_min: .4f}\nmax(charges): {chrg_max: .4f}\n"
)
c1 = cb if chrg_min < 0.0 else cw
c2 = cr if chrg_max > 0.0 else cw
rgb_colors, _ = interpolate_colors(charges, c1, c2)
# Dump geometry to temporary file
with tempfile.NamedTemporaryFile("w", suffix=".xyz") as tmp_xyz:
tmp_xyz.write(geom.as_xyz())
tmp_xyz.flush()
spt = f"load {tmp_xyz.name};\n"
for i, ((x, y, z, pc), (r, g, b)) in enumerate(zip(point_charges, rgb_colors)):
id_ = f"chrg{i}"
spt += (
f"isosurface {id_} center {{{x} {y} {z}}} sphere 0.25;\n"
f"color ${id_} [{r} {g} {b}];\n"
)
print(f"SPT:\n\n{spt}")
call_jmol(spt, show=True)
if __name__ == "__main__":
cdd = "/scratch/turbontos/11_bz_pure/image_000.005.S_004_CDD.cub"
render_cdd_cube(cdd)
|
eljost/pysisyphus
|
pysisyphus/wrapper/jmol.py
|
Python
|
gpl-3.0
| 2,886
|
[
"Jmol"
] |
f685a7a9c8e127f71467abeb5d645c3c2a7f74a34db3f9330a2200b165d911ef
|
'''
Main file for displaying depth/color/skeleton information and extracting features
'''
import os
import optparse
from time import time
import cPickle as pickle
import numpy as np
import scipy.misc as sm
import scipy.ndimage as nd
import skimage
from skimage import feature, color
from pyKinectTools.utils.Utils import createDirectory
from pyKinectTools.utils.VideoViewer import VideoViewer
from pyKinectTools.utils.DepthUtils import world2depth, depthIm2XYZ
from pyKinectTools.utils.MultiCameraUtils import multiCameraTimeline, formatFileString
from pyKinectTools.utils.FeatureUtils import saveFeatures, loadFeatures, learnICADict, learnNMFDict, displayComponents
from pyKinectTools.algs.HistogramOfOpticalFlow import getFlow, hof, splitIm
from pyKinectTools.algs.BackgroundSubtraction import AdaptiveMixtureOfGaussians, fill_image, extract_people
from pyKinectTools.algs.FeatureExtraction import calculateBasicPose, computeUserFeatures, computeFeaturesWithSkels
vv = VideoViewer()
''' 3D visualization '''
if 0:
from mayavi import mlab
figure = mlab.figure(1, bgcolor=(0,0,0), fgcolor=(1,1,1))
mlab.clf()
figure.scene.disable_render = True
''' Debugging '''
from IPython import embed
import cProfile
np.seterr(all='ignore')
''' Keyboard keys (using Video Viewer)'''
keys_ESC = 27
keys_left_arrow = 314
keys_right_arrow = 316
keys_down_arrow = 317
keys_space = 32
keys_i = 105
keys_help = 104
keys_frame_left = 314
keys_frame_right = 316
''' Using OpenCV
keys_ESC = 1048603
keys_right_arrow = 1113939
keys_left_arrow = 1113937
keys_down_arrow = 1113940
keys_space = 1048608
keys_i = 1048681
keys_help = 1048680
keys_frame_left = 1048673
keys_frame_right = 1048691
'''
# -------------------------MAIN------------------------------------------
# @profile
def main(get_depth, get_color, get_skeleton, get_mask, calculate_features, visualize, save_anonomized, device):
dev = device
ret = 0
backgroundTemplates = np.empty([1,1,1])
backgroundModel = None
backgroundCount = 20
bgPercentage = .05
prevDepthIm = None
day_dirs = os.listdir('depth/')
day_dirs = [x for x in day_dirs if x[0]!='.']
day_dirs.sort(key=lambda x: int(x))
hour_index = 0
minute_index=0
allFeatures = []
coms = []
orns = []
play_speed = 1
new_date_entered = False
framerate = 0
frame_prev = 0
frame_prev_time = time()
day_index = 0
while day_index < len(day_dirs):
if new_date_entered:
try:
day_index = day_dirs.index(day_new)
except:
print "Day not found"
day_index = 0
dayDir = day_dirs[day_index]
hour_dirs = os.listdir('depth/'+dayDir)
hour_dirs = [x for x in hour_dirs if x[0]!='.']
hour_dirs.sort(key=lambda x: int(x))
'''Hours'''
''' Check for new Hours index '''
if not new_date_entered:
if play_speed >= 0 and ret != keys_frame_left:
hour_index = 0
else:
hour_index = len(hour_dirs)-1
else:
try:
hour_index = hour_dirs.index(hour_new)
except:
print "Hour was not found"
hour_index = 0
while hour_index < len(hour_dirs):
hourDir = hour_dirs[hour_index]
minute_dirs = os.listdir('depth/'+dayDir+'/'+hourDir)
minute_dirs = [x for x in minute_dirs if x[0]!='.']
minute_dirs.sort(key=lambda x: int(x))
'''Minutes'''
''' Check for new minute index '''
if not new_date_entered:
if play_speed >= 0 and ret != keys_frame_left:
minute_index = 0
else:
minute_index = len(minute_dirs)-1
else:
try:
minute_index = minute_dirs.index(minute_new)
except:
print "Minute was not found"
minute_index = 0
''' Loop through this minute '''
while minute_index < len(minute_dirs):
minute_dir = minute_dirs[minute_index]
# Prevent from reading hidden files
if minute_dir[0] == '.':
continue
depth_files = []
skelFiles = []
# For each available device:
devices = os.listdir('depth/'+dayDir+'/'+hourDir+'/'+minute_dir)
devices = [x for x in devices if x[0]!='.' and x.find('tmp')<0]
devices.sort()
deviceID = "device_{0:d}".format(dev+1)
if not os.path.isdir('depth/'+dayDir+'/'+hourDir+'/'+minute_dir+'/'+deviceID):
continue
''' Sort files '''
if get_depth:
depthTmp = os.listdir('depth/'+dayDir+'/'+hourDir+'/'+minute_dir+'/'+deviceID)
tmpSort = [int(x.split('_')[-3])*100 + int(formatFileString(x.split('_')[-2])) for x in depthTmp]
depthTmp = np.array(depthTmp)[np.argsort(tmpSort)].tolist()
depth_files.append([x for x in depthTmp if x.find('.png')>=0])
if get_skeleton:
skelTmp = os.listdir('skel/'+dayDir+'/'+hourDir+'/'+minute_dir+'/'+deviceID)
tmpSort = [int(x.split('_')[-4])*100 + int(formatFileString(x.split('_')[-3])) for x in skelTmp]
skelTmp = np.array(skelTmp)[np.argsort(tmpSort)].tolist()
skelFiles.append([x for x in skelTmp if x.find('.dat')>=0])
if len(depth_files) == 0:
continue
if play_speed >= 0 and ret != keys_frame_left:
frame_id = 0
else:
frame_id = len(depth_files[dev])-1
while frame_id < len(depth_files[0]):
# while frame_id < len(depth_files[dev]):
depthFile = depth_files[0][frame_id]
# try:
if 1:
''' Load Depth '''
if get_depth:
depthIm = sm.imread('depth/'+dayDir+'/'+hourDir+'/'+minute_dir+'/'+deviceID+'/'+depthFile)
depthIm = np.array(depthIm, dtype=np.uint16)
''' Load Color '''
if get_color:
colorFile = 'color_'+depthFile[6:-4]+'.jpg'
colorIm = sm.imread('color/'+dayDir+'/'+hourDir+'/'+minute_dir+'/'+deviceID+'/'+colorFile)
# colorIm_g = colorIm.mean(-1, dtype=np.uint8)
colorIm_g = skimage.img_as_ubyte(skimage.color.rgb2gray(colorIm))
# colorIm_lab = skimage.color.rgb2lab(colorIm).astype(np.uint8)
''' Load Skeleton Data '''
if get_skeleton:
skelFile = 'skel_'+depthFile[6:-4]+'_.dat'
if os.path.isfile('skel/'+dayDir+'/'+hourDir+'/'+minute_dir+'/'+deviceID+'/'+skelFile):
with open('skel/'+dayDir+'/'+hourDir+'/'+minute_dir+'/'+deviceID+'/'+skelFile, 'rb') as inFile:
users = pickle.load(inFile)
else:
print "No user file:", skelFile
coms = [users[x]['com'] for x in users.keys() if users[x]['com'][2] > 0.0]
jointCount = 0
for i in users.keys():
user = users[i]
timestamp = depthFile[:-4].split('_')[1:] # Day, hour, minute, second, millisecond, Frame number in this second
depthIm = np.minimum(depthIm.astype(np.float), 5000)
fill_image(depthIm)
'''Background model'''
if backgroundModel is None:
bgSubtraction = AdaptiveMixtureOfGaussians(depthIm, maxGaussians=3, learningRate=0.01, decayRate=0.02, variance=300**2)
backgroundModel = bgSubtraction.getModel()
if get_color:
prevColorIm = colorIm_g.copy()
continue
else:
bgSubtraction.update(depthIm)
backgroundModel = bgSubtraction.getModel()
foregroundMask = bgSubtraction.get_foreground(thresh=50)
''' Find people '''
if get_skeleton:
ret = plotUsers(depthIm, users, device=deviceID, vis=True)
if get_mask:
foregroundMask, userBoundingBoxes, userLabels = extract_people(depthIm, foregroundMask, minPersonPixThresh=1500, gradientFilter=True, gradThresh=100)
''' Calculate user features '''
if calculate_features and get_color:
''' Color Optical Flow '''
flow = getFlow(prevColorIm, colorIm_g)
prevColorIm = colorIm_g.copy()
userCount = len(userBoundingBoxes)
for i in xrange(userCount):
userBox = userBoundingBoxes[i]
userMask = foregroundMask==i+1
allFeatures.append(computeUserFeatures(colorIm, depthIm, flow, userBox, time=timestamp, mask=userMask, windowSize=[96,72], visualise=False))
''' Or get CoM + orientation '''
if get_mask and not calculate_features:
userCount = len(userBoundingBoxes)
for i in xrange(userCount):
userBox = userBoundingBoxes[i]
userMask = foregroundMask==i+1
com, ornBasis = calculateBasicPose(depthIm, userMask)
coms.append(com)
orns.append(ornBasis[1])
allFeatures.append({'com':com, "orn":ornBasis, 'time':timestamp})
''' Visualization '''
if visualize:
tmpSecond = depthFile.split("_")[-3]
if len(tmpSecond) == 0:
tmpSecond = '0'+tmpSecond
if get_depth:
vv.imshow("Depth", depthIm/6000.)
vv.putText("Depth", "Day "+dayDir+" Time "+hourDir+":"+minute_dir+":"+tmpSecond, (5,220), size=15)
vv.putText("Depth", "Play speed: "+str(play_speed)+"x", (5,15), size=15)
vv.putText("Depth", str(int(framerate))+" fps", (275,15), size=15)
if get_color:
vv.putText(colorIm, "Day "+dayDir+" Time "+hourDir+":"+minute_dir+" Dev#"+str(dev), (10,220))
vv.imshow("I_orig", colorIm)
if get_mask:
# vv.imshow("I", colorIm*foregroundMask[:,:,np.newaxis])
vv.imshow("I_masked", colorIm + (255-colorIm)*(((foregroundMask)[:,:,np.newaxis])))
if get_mask:
vv.imshow("Mask", foregroundMask.astype(np.float)/float(foregroundMask.max()))
# vv.imshow("BG Model", backgroundModel.astype(np.float)/float(backgroundModel.max()))
''' Multi-camera map '''
if 0 and len(coms) > 0:
mapRez = [200,200]
mapIm = np.zeros(mapRez)
coms_np = np.array(coms)
xs = np.minimum(np.maximum(mapRez[0]+((coms_np[:,2]+500)/3000.*mapRez[0]).astype(np.int), 0),mapRez[0]-1)
ys = np.minimum(np.maximum(((coms_np[:,0]+500)/1500.*mapRez[0]).astype(np.int), 0), mapRez[1]-1)
mapIm[xs, ys] = 255
vv.imshow("Map", mapIm)
# scatter(coms_np[:,0], -coms_np[:,2])
'''3D Vis'''
if 0:
# figure = mlab.figure(1, fgcolor=(1,1,1), bgcolor=(0,0,0))
# from pyKinectTools.utils.DepthUtils import *
pts = depthIm2XYZ(depthIm).astype(np.int)
interval = 25
figure.scene.disable_render = True
mlab.clf()
# ss = mlab.points3d(-pts[::interval,0], pts[::interval,1], pts[::interval,2], colormap='Blues', vmin=1000., vmax=5000., mode='2dvertex')
ss = mlab.points3d(pts[::interval,0], pts[::interval,1], pts[::interval,2], 5.-(np.minimum(pts[::interval,2], 5000)/float((-pts[:,2]).max()))/1000., scale_factor=25., colormap='Blues')#, mode='2dvertex')
# , scale_factor=25.
mlab.view(azimuth=0, elevation=0, distance=3000., focalpoint=(0,0,0), figure=figure)#, reset_roll=False)
# mlab.roll(90)
currentView = mlab.view()
figure.scene.disable_render = False
mlab.draw()
# mlab.show()
# ss = mlab.points3d(pts[::interval,0], pts[::interval,1], pts[::interval,2], color=col, scale_factor=5)
# ss = mlab.points3d(pts[:,0], pts[:,1], pts[:,2], color=(1,1,1), scale_factor=5)
# ss = mlab.points3d(pts[:,0], pts[:,1], pts[:,2])
''' Playback control: Look at keyboard input '''
ret = vv.waitKey()
if frame_id - frame_prev > 0:
framerate = (frame_id - frame_prev) / (time() - frame_prev_time)
frame_prev = frame_id
frame_prev_time = time()
new_date_entered = False
if ret > 0:
# player_controls(ret)
# print "Ret is",ret
if ret == keys_ESC:
break
elif ret == keys_space:
print "Enter the following into the command line"
tmp = raw_input("Enter date: ")
day_new = tmp
tmp = raw_input("Enter hour: ")
hour_new = tmp
tmp = raw_input("Enter minute: ")
minute_new = tmp
print "New date:", day_new, hour_new, minute_new
new_date_entered = True
break
elif ret == keys_down_arrow:
play_speed = 0
elif ret == keys_left_arrow:
play_speed -= 1
elif ret == keys_right_arrow:
play_speed += 1
elif ret == keys_i:
embed()
elif ret == keys_frame_left:
frame_id -= 1
elif ret == keys_frame_right:
frame_id += 1
elif ret == keys_help:
display_help()
frame_id += play_speed
if save_anonomized and get_mask:
save_dir = 'color_masked/'+dayDir+'/'+hourDir+'/'+minute_dir+'/'+devices[dev]+'/'
createDirectory(save_dir)
sm.imsave(save_dir+'colorM_'+depthFile[6:-4]+'.jpg', colorIm*(1-foregroundMask))
# except:
# print "Erroneous frame"
# if visualize:
# vv.imshow("D", depthIm.astype(np.float)/5000)
# ret = vv.waitKey(10)
# End seconds
if ret == keys_ESC or new_date_entered:
break
if frame_id >= len(depth_files[0]):
minute_index += 1
elif frame_id < 0:
minute_index -= 1
break
# End hours
if ret == keys_ESC or new_date_entered:
break
if minute_index >= len(minute_dirs):
hour_index += 1
elif minute_index < 0:
hour_index -= 1
break
# End days
if ret == keys_ESC:
break
if new_date_entered:
break
if hour_index >= len(hour_dirs):
day_index += 1
elif hour_index < 0:
day_index -= 1
if day_index < 0:
day_index = 0
if ret == keys_ESC or day_index > len(day_dirs):
break
np.save("/media/Data/r40_cX_", allFeatures)
embed()
if 0:
coms1 = np.load('../../ICU_Dec2012_r40_c1_coms_partial.npy')
T = np.array([-0.8531195226064485, -0.08215320378328564, 0.5152066878990207, 761.2299809410998, 0.3177589268248827, 0.7014041249433673, 0.6380137286418792, 1427.5420972165339, -0.4137829679564377, 0.7080134918351199, -0.5722766383564786, -3399.696025885259, 0.0, 0.0, 0.0, 1.0])
T = T.reshape([4,4])
coms12 = np.array([np.dot(np.asarray(T), np.array([x[0], x[1], x[2], 1])) for x in coms1])
def display_help():
print ""
print "Playback commands: enter these in the image viewer"
print "--------------------"
print "h help menu"
print "i interupt with debugger"
print "a previous frame"
print "s next frame"
print "spacebar pick new time/date [enter in terminal]"
print "left arrow key rewind faster"
print "right arrow key fast forward faster"
print "down arrow key pause"
print "escape key exit"
if __name__=="__main__":
parser = optparse.OptionParser()
parser.add_option('-s', '--skel', dest='skel', action="store_true", default=False, help='Enable skeleton')
parser.add_option('-d', '--depth', dest='depth', action="store_true", default=False, help='Enable depth images')
parser.add_option('-c', '--color', dest='color', action="store_true", default=False, help='Enable color images')
parser.add_option('-m', '--mask', dest='mask', action="store_true", default=False, help='Enable enternal mask')
parser.add_option('-a', '--anonomize', dest='save', action="store_true", default=False, help='Save anonomized RGB image')
parser.add_option('-f', '--calcFeatures', dest='bgSubtraction', action="store_true", default=False, help='Enable feature extraction')
parser.add_option('-v', '--visualize', dest='viz', action="store_true", default=False, help='Enable visualization')
parser.add_option('-i', '--dev', dest='dev', type='int', default=0, help='Device number')
(opt, args) = parser.parse_args()
if opt.bgSubtraction or opt.save:
opt.mask = True
if opt.viz:
display_help()
if len(args) > 0:
print "Wrong input argument"
elif opt.depth==False and opt.color==False and opt.skel==False:
print "You must supply the program with some arguments."
else:
main(get_depth=opt.depth, get_skeleton=opt.skel, get_color=opt.color, get_mask=opt.mask, calculate_features=opt.bgSubtraction, visualize=opt.viz, save_anonomized=opt.save, device=opt.dev)
'''Profiling'''
# cProfile.runctx('main()', globals(), locals(), filename="ShowSkeletons.profile")
if 0:
hogIms = np.vstack([allFeatures[i]['hogIm'] for i in range(len(allFeatures))])
hogs = np.vstack([allFeatures[i]['hog'] for i in range(len(allFeatures))])
|
colincsl/pyKinectTools
|
pyKinectTools/scripts/ExtractAndVisualizeVideo.py
|
Python
|
bsd-2-clause
| 15,899
|
[
"Mayavi"
] |
d021dd5c8d6764612bd2aa5bd12fe49cb9964f363f31bc15943690eb66b665f4
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
import re
yes = re.compile(r'^(yes|true|on|1$)', re.IGNORECASE)
no = re.compile(r'^(no|false|off|0$)', re.IGNORECASE)
der0th = re.compile(r'^(0|none|energy)', re.IGNORECASE)
der1st = re.compile(r'^(1|first|gradient)', re.IGNORECASE)
der2nd = re.compile(r'^(2|second|hessian)', re.IGNORECASE)
|
amjames/psi4
|
psi4/driver/p4util/p4regex.py
|
Python
|
lgpl-3.0
| 1,246
|
[
"Psi4"
] |
b2d1d109ac69bf16f08cf50775719c7cc342876903bab6f04fe751b6290c40c3
|
"""
Original code by @philopon
https://gist.github.com/philopon/a75a33919d9ae41dbed5bc6a39f5ede2
"""
import sys
import os
import requests
import subprocess
import shutil
from logging import getLogger, StreamHandler, INFO
logger = getLogger(__name__)
logger.addHandler(StreamHandler())
logger.setLevel(INFO)
default_channels = [
"conda-forge",
"omnia",
]
default_packages = [
"rdkit",
"openmm",
"pdbfixer",
]
def install(
chunk_size=4096,
file_name="Miniconda3-latest-Linux-x86_64.sh",
url_base="https://repo.continuum.io/miniconda/",
conda_path=os.path.expanduser(os.path.join("~", "miniconda")),
add_python_path=True,
# default channels are "conda-forge" and "omnia"
additional_channels=[],
# default packages are "rdkit", "openmm" and "pdbfixer"
additional_packages=[],
):
"""Install conda packages on Google Colab
For GPU/CPU notebook
```
import conda_installer
conda_installer.install()
```
If you want to add other packages, you can use additional_conda_channels and
additional_conda_package arguments. Please see the example.
```
import conda_installer
conda_installer.install(
additional_conda_channels=[]
additional_conda_packages=["mdtraj", "networkx"]
)
// add channel
import conda_installer
conda_installer.install(
additional_conda_channels=["dglteam"]
additional_conda_packages=["dgl-cuda10.1"]
)
```
"""
python_path = os.path.join(
conda_path,
"lib",
"python{0}.{1}".format(*sys.version_info),
"site-packages",
)
if add_python_path and python_path not in sys.path:
logger.info("add {} to PYTHONPATH".format(python_path))
sys.path.append(python_path)
is_installed = []
packages = list(set(default_packages + additional_packages))
for package in packages:
package = "simtk" if package == "openmm" else package
is_installed.append(os.path.isdir(os.path.join(python_path, package)))
if all(is_installed):
logger.info("all packages are already installed")
return
url = url_base + file_name
python_version = "{0}.{1}.{2}".format(*sys.version_info)
logger.info("python version: {}".format(python_version))
if os.path.isdir(conda_path):
logger.warning("remove current miniconda")
shutil.rmtree(conda_path)
elif os.path.isfile(conda_path):
logger.warning("remove {}".format(conda_path))
os.remove(conda_path)
logger.info('fetching installer from {}'.format(url))
res = requests.get(url, stream=True)
res.raise_for_status()
with open(file_name, 'wb') as f:
for chunk in res.iter_content(chunk_size):
f.write(chunk)
logger.info('done')
logger.info('installing miniconda to {}'.format(conda_path))
subprocess.check_call(["bash", file_name, "-b", "-p", conda_path])
logger.info('done')
logger.info("installing rdkit, openmm, pdbfixer")
channels = list(set(default_channels + additional_channels))
for channel in channels:
subprocess.check_call([
os.path.join(conda_path, "bin", "conda"), "config", "--append",
"channels", channel
])
logger.info("added {} to channels".format(channel))
subprocess.check_call([
os.path.join(conda_path, "bin", "conda"),
"install",
"--yes",
"python=={}".format(python_version),
*packages,
])
logger.info("done")
logger.info("conda packages installation finished!")
if __name__ == "__main__":
install()
|
lilleswing/deepchem
|
scripts/colab_install.py
|
Python
|
mit
| 3,490
|
[
"MDTraj",
"OpenMM",
"RDKit"
] |
46f9f4c92184131452b5d1294a1d24a03d4295baf062bae10115bf8a2870002b
|
from numpy import arange, meshgrid, empty
from abc import ABCMeta, abstractmethod
from pyproj import Proj
class ModelGrid(metaclass=ABCMeta):
"""Creates a spatial grid"""
def __init__(self):
self.var_name = '' # name of the modelled variable
@abstractmethod # flags method that MUST be implemented by all subclasses
def to_netCDF(self, filename):
pass
@property
@abstractmethod
def _convert_to_CS(self):
pass
def __repr__(self):
return f"{self.__class__.__name__}, Abstract base class for model grids."
class SeNorgeGrid(ModelGrid):
def __init__(self, var_name: str):
super(ModelGrid, self).__init__()
self.var_name = var_name
# lower left corner in m
self.LowerLeftEast = -75000
self.LowerLeftNorth = 6450000
# upper right corner in m
self.UpperRightEast = 1120000
self.UpperRightNorth = 8000000
# interval
self.dx = 1000
self.dy = 1000
self.x = arange(self.LowerLeftEast, self.UpperRightEast, self.dx)
self.y = arange(self.LowerLeftNorth, self.UpperRightNorth, self.dy)
self.number_of_cells = len(self.x) * len(self.y)
self.values = empty(shape=(len(self.x), len(self.y)), dtype=float)
def _convert_to_CS(self):
# Converts vector into coordinate system
self.xgrid, self.ygrid = meshgrid(self.x, self.y)
self.p = Proj('+proj=utm +zone=33 +ellps=WGS84 +datum=WGS84 +units=m +no_defs')
self.lon, self.lat = self.p(self.xgrid, self.ygrid, inverse=True)
def __repr__(self):
return f"{self.__class__.__name__}({self.var_name}: #cells: x({len(self.x)}) by y({len(self.y)}) = {self.number_of_cells}," \
f" resolution: {self.dx} by {self.dy} m)"
def to_netCDF(self, filename):
"""
Saves the data in netCDF format
:return: a netCDF file containing the data and metadata of the grid
"""
pass
def from_netCDF(self, netcdf_file):
"""
Import data and metadata from a netCDF file
:param netcdf_file: NetCDF file to load
"""
pass
def from_BIL(self, bil_file):
"""
Import grid data from a BIL file.
:param bil_file: Binary data file
"""
pass
def from_ndarray(self, arr):
"""
:param arr: numpy array of shape (self.y, self.x)
:return:
"""
self.values = arr
if __name__ == "__main__":
sg = SeNorgeGrid('Temperature')
print(sg)
|
kmunve/APS
|
aps/aps_io/grid_data.py
|
Python
|
mit
| 2,564
|
[
"NetCDF"
] |
ccbd5393a47c83af3b9e2ab97ce5f54575d4a3852e63c1d7e7156e27e0d0a06b
|
# Copyright 2000-2002 Brad Chapman.
# Copyright 2004-2005 by M de Hoon.
# Copyright 2007-2010 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Provides objects to represent biological sequences with alphabets.
See also U{http://biopython.org/wiki/Seq} and the chapter in our tutorial:
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.html}
- U{http://biopython.org/DIST/docs/tutorial/Tutorial.pdf}
"""
__docformat__ ="epytext en" #Don't just use plain text in epydoc API pages!
import string #for maketrans only
import array
import sys
from Bio import Alphabet
from Bio.Alphabet import IUPAC
from Bio.SeqRecord import SeqRecord
from Bio.Data.IUPACData import ambiguous_dna_complement, ambiguous_rna_complement
from Bio.Data import CodonTable
def _maketrans(complement_mapping):
"""Makes a python string translation table (PRIVATE).
Arguments:
- complement_mapping - a dictionary such as ambiguous_dna_complement
and ambiguous_rna_complement from Data.IUPACData.
Returns a translation table (a string of length 256) for use with the
python string's translate method to use in a (reverse) complement.
Compatible with lower case and upper case sequences.
For internal use only.
"""
before = ''.join(complement_mapping.keys())
after = ''.join(complement_mapping.values())
before = before + before.lower()
after = after + after.lower()
if sys.version_info[0] == 3 :
return str.maketrans(before, after)
else:
return string.maketrans(before, after)
_dna_complement_table = _maketrans(ambiguous_dna_complement)
_rna_complement_table = _maketrans(ambiguous_rna_complement)
class Seq(object):
"""A read-only sequence object (essentially a string with an alphabet).
Like normal python strings, our basic sequence object is immutable.
This prevents you from doing my_seq[5] = "A" for example, but does allow
Seq objects to be used as dictionary keys.
The Seq object provides a number of string like methods (such as count,
find, split and strip), which are alphabet aware where appropriate.
In addition to the string like sequence, the Seq object has an alphabet
property. This is an instance of an Alphabet class from Bio.Alphabet,
for example generic DNA, or IUPAC DNA. This describes the type of molecule
(e.g. RNA, DNA, protein) and may also indicate the expected symbols
(letters).
The Seq object also provides some biological methods, such as complement,
reverse_complement, transcribe, back_transcribe and translate (which are
not applicable to sequences with a protein alphabet).
"""
def __init__(self, data, alphabet = Alphabet.generic_alphabet):
"""Create a Seq object.
Arguments:
- seq - Sequence, required (string)
- alphabet - Optional argument, an Alphabet object from Bio.Alphabet
You will typically use Bio.SeqIO to read in sequences from files as
SeqRecord objects, whose sequence will be exposed as a Seq object via
the seq property.
However, will often want to create your own Seq objects directly:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_seq = Seq("MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF",
... IUPAC.protein)
>>> my_seq
Seq('MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF', IUPACProtein())
>>> print my_seq
MKQHKAMIVALIVICITAVVAALVTRKDLCEVHIRTGQTEVAVF
>>> my_seq.alphabet
IUPACProtein()
"""
# Enforce string storage
if not isinstance(data, basestring):
raise TypeError("The sequence data given to a Seq object should "
"be a string (not another Seq object etc)")
self._data = data
self.alphabet = alphabet # Seq API requirement
# A data property is/was a Seq API requirement
# Note this is read only since the Seq object is meant to be imutable
@property
def data(self) :
"""Sequence as a string (DEPRECATED).
This is a read only property provided for backwards compatility with
older versions of Biopython (as is the tostring() method). We now
encourage you to use str(my_seq) instead of my_seq.data or the method
my_seq.tostring().
In recent releases of Biopython it was possible to change a Seq object
by updating its data property, but this triggered a deprecation warning.
Now the data property is read only, since Seq objects are meant to be
immutable:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_seq = Seq("ACGT", generic_dna)
>>> str(my_seq) == my_seq.tostring() == "ACGT"
True
>>> my_seq.data = "AAAA"
Traceback (most recent call last):
...
AttributeError: can't set attribute
"""
import warnings
import Bio
warnings.warn("Accessing the .data attribute is deprecated. Please "
"use str(my_seq) or my_seq.tostring() instead of "
"my_seq.data.", Bio.BiopythonDeprecationWarning)
return str(self)
def __repr__(self):
"""Returns a (truncated) representation of the sequence for debugging."""
if len(self) > 60:
#Shows the last three letters as it is often useful to see if there
#is a stop codon at the end of a sequence.
#Note total length is 54+3+3=60
return "%s('%s...%s', %s)" % (self.__class__.__name__,
str(self)[:54], str(self)[-3:],
repr(self.alphabet))
else:
return "%s(%s, %s)" % (self.__class__.__name__,
repr(self._data),
repr(self.alphabet))
def __str__(self):
"""Returns the full sequence as a python string, use str(my_seq).
Note that Biopython 1.44 and earlier would give a truncated
version of repr(my_seq) for str(my_seq). If you are writing code
which need to be backwards compatible with old Biopython, you
should continue to use my_seq.tostring() rather than str(my_seq).
"""
return self._data
def __hash__(self):
"""Hash for comparison.
See the __cmp__ documentation - we plan to change this!
"""
return id(self) #Currently use object identity for equality testing
def __cmp__(self, other):
"""Compare the sequence to another sequence or a string (README).
Historically comparing Seq objects has done Python object comparison.
After considerable discussion (keeping in mind constraints of the
Python language, hashes and dictionary support) a future release of
Biopython will change this to use simple string comparison. The plan is
that comparing incompatible alphabets (e.g. DNA to RNA) will trigger a
warning.
This version of Biopython still does Python object comparison, but with
a warning about this future change. During this transition period,
please just do explicit comparisons:
>>> seq1 = Seq("ACGT")
>>> seq2 = Seq("ACGT")
>>> id(seq1) == id(seq2)
False
>>> str(seq1) == str(seq2)
True
Note - This method indirectly supports ==, < , etc.
"""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
import warnings
warnings.warn("In future comparing Seq objects will use string "
"comparison (not object comparison). Incompatible "
"alphabets will trigger a warning (not an exception). "
"In the interim please use id(seq1)==id(seq2) or "
"str(seq1)==str(seq2) to make your code explicit "
"and to avoid this warning.", FutureWarning)
return cmp(id(self), id(other))
def __len__(self):
"""Returns the length of the sequence, use len(my_seq)."""
return len(self._data) # Seq API requirement
def __getitem__(self, index) : # Seq API requirement
"""Returns a subsequence of single letter, use my_seq[index]."""
#Note since Python 2.0, __getslice__ is deprecated
#and __getitem__ is used instead.
#See http://docs.python.org/ref/sequence-methods.html
if isinstance(index, int):
#Return a single letter as a string
return self._data[index]
else:
#Return the (sub)sequence as another Seq object
return Seq(self._data[index], self.alphabet)
def __add__(self, other):
"""Add another sequence or string to this sequence.
If adding a string to a Seq, the alphabet is preserved:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_protein
>>> Seq("MELKI", generic_protein) + "LV"
Seq('MELKILV', ProteinAlphabet())
When adding two Seq (like) objects, the alphabets are important.
Consider this example:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet.IUPAC import unambiguous_dna, ambiguous_dna
>>> unamb_dna_seq = Seq("ACGT", unambiguous_dna)
>>> ambig_dna_seq = Seq("ACRGT", ambiguous_dna)
>>> unamb_dna_seq
Seq('ACGT', IUPACUnambiguousDNA())
>>> ambig_dna_seq
Seq('ACRGT', IUPACAmbiguousDNA())
If we add the ambiguous and unambiguous IUPAC DNA alphabets, we get
the more general ambiguous IUPAC DNA alphabet:
>>> unamb_dna_seq + ambig_dna_seq
Seq('ACGTACRGT', IUPACAmbiguousDNA())
However, if the default generic alphabet is included, the result is
a generic alphabet:
>>> Seq("") + ambig_dna_seq
Seq('ACRGT', Alphabet())
You can't add RNA and DNA sequences:
>>> from Bio.Alphabet import generic_dna, generic_rna
>>> Seq("ACGT", generic_dna) + Seq("ACGU", generic_rna)
Traceback (most recent call last):
...
TypeError: Incompatable alphabets DNAAlphabet() and RNAAlphabet()
You can't add nucleotide and protein sequences:
>>> from Bio.Alphabet import generic_dna, generic_protein
>>> Seq("ACGT", generic_dna) + Seq("MELKI", generic_protein)
Traceback (most recent call last):
...
TypeError: Incompatable alphabets DNAAlphabet() and ProteinAlphabet()
"""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
a = Alphabet._consensus_alphabet([self.alphabet, other.alphabet])
return self.__class__(str(self) + str(other), a)
elif isinstance(other, basestring):
#other is a plain string - use the current alphabet
return self.__class__(str(self) + other, self.alphabet)
elif isinstance(other, SeqRecord):
#Get the SeqRecord's __radd__ to handle this
return NotImplemented
else :
raise TypeError
def __radd__(self, other):
"""Adding a sequence on the left.
If adding a string to a Seq, the alphabet is preserved:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_protein
>>> "LV" + Seq("MELKI", generic_protein)
Seq('LVMELKI', ProteinAlphabet())
Adding two Seq (like) objects is handled via the __add__ method.
"""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
a = Alphabet._consensus_alphabet([self.alphabet, other.alphabet])
return self.__class__(str(other) + str(self), a)
elif isinstance(other, basestring):
#other is a plain string - use the current alphabet
return self.__class__(other + str(self), self.alphabet)
else:
raise TypeError
def tostring(self): # Seq API requirement
"""Returns the full sequence as a python string (semi-obsolete).
Although not formally deprecated, you are now encouraged to use
str(my_seq) instead of my_seq.tostring()."""
#TODO - Fix all places elsewhere in Biopython using this method,
#then start deprecation process?
#import warnings
#warnings.warn("This method is obsolete; please use str(my_seq) "
# "instead of my_seq.tostring().",
# PendingDeprecationWarning)
return str(self)
def tomutable(self): # Needed? Or use a function?
"""Returns the full sequence as a MutableSeq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_seq = Seq("MKQHKAMIVALIVICITAVVAAL",
... IUPAC.protein)
>>> my_seq
Seq('MKQHKAMIVALIVICITAVVAAL', IUPACProtein())
>>> my_seq.tomutable()
MutableSeq('MKQHKAMIVALIVICITAVVAAL', IUPACProtein())
Note that the alphabet is preserved.
"""
return MutableSeq(str(self), self.alphabet)
def _get_seq_str_and_check_alphabet(self, other_sequence):
"""string/Seq/MutableSeq to string, checking alphabet (PRIVATE).
For a string argument, returns the string.
For a Seq or MutableSeq, it checks the alphabet is compatible
(raising an exception if it isn't), and then returns a string.
"""
try:
other_alpha = other_sequence.alphabet
except AttributeError:
#Assume other_sequence is a string
return other_sequence
#Other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet, other_alpha]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other_alpha)))
#Return as a string
return str(other_sequence)
def count(self, sub, start=0, end=sys.maxint):
"""Non-overlapping count method, like that of a python string.
This behaves like the python string method of the same name,
which does a non-overlapping count!
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
e.g.
>>> from Bio.Seq import Seq
>>> my_seq = Seq("AAAATGA")
>>> print my_seq.count("A")
5
>>> print my_seq.count("ATG")
1
>>> print my_seq.count(Seq("AT"))
1
>>> print my_seq.count("AT", 2, -1)
1
HOWEVER, please note because python strings and Seq objects (and
MutableSeq objects) do a non-overlapping search, this may not give
the answer you expect:
>>> "AAAA".count("AA")
2
>>> print Seq("AAAA").count("AA")
2
A non-overlapping search would give the answer as three!
"""
#If it has one, check the alphabet:
sub_str = self._get_seq_str_and_check_alphabet(sub)
return str(self).count(sub_str, start, end)
def __contains__(self, char):
"""Implements the 'in' keyword, like a python string.
e.g.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna, generic_rna, generic_protein
>>> my_dna = Seq("ATATGAAATTTGAAAA", generic_dna)
>>> "AAA" in my_dna
True
>>> Seq("AAA") in my_dna
True
>>> Seq("AAA", generic_dna) in my_dna
True
Like other Seq methods, this will raise a type error if another Seq
(or Seq like) object with an incompatible alphabet is used:
>>> Seq("AAA", generic_rna) in my_dna
Traceback (most recent call last):
...
TypeError: Incompatable alphabets DNAAlphabet() and RNAAlphabet()
>>> Seq("AAA", generic_protein) in my_dna
Traceback (most recent call last):
...
TypeError: Incompatable alphabets DNAAlphabet() and ProteinAlphabet()
"""
#If it has one, check the alphabet:
sub_str = self._get_seq_str_and_check_alphabet(char)
return sub_str in str(self)
def find(self, sub, start=0, end=sys.maxint):
"""Find method, like that of a python string.
This behaves like the python string method of the same name.
Returns an integer, the index of the first occurrence of substring
argument sub in the (sub)sequence given by [start:end].
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
Returns -1 if the subsequence is NOT found.
e.g. Locating the first typical start codon, AUG, in an RNA sequence:
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.find("AUG")
3
"""
#If it has one, check the alphabet:
sub_str = self._get_seq_str_and_check_alphabet(sub)
return str(self).find(sub_str, start, end)
def rfind(self, sub, start=0, end=sys.maxint):
"""Find from right method, like that of a python string.
This behaves like the python string method of the same name.
Returns an integer, the index of the last (right most) occurrence of
substring argument sub in the (sub)sequence given by [start:end].
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
Returns -1 if the subsequence is NOT found.
e.g. Locating the last typical start codon, AUG, in an RNA sequence:
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.rfind("AUG")
15
"""
#If it has one, check the alphabet:
sub_str = self._get_seq_str_and_check_alphabet(sub)
return str(self).rfind(sub_str, start, end)
def startswith(self, prefix, start=0, end=sys.maxint):
"""Does the Seq start with the given prefix? Returns True/False.
This behaves like the python string method of the same name.
Return True if the sequence starts with the specified prefix
(a string or another Seq object), False otherwise.
With optional start, test sequence beginning at that position.
With optional end, stop comparing sequence at that position.
prefix can also be a tuple of strings to try. e.g.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.startswith("GUC")
True
>>> my_rna.startswith("AUG")
False
>>> my_rna.startswith("AUG", 3)
True
>>> my_rna.startswith(("UCC","UCA","UCG"),1)
True
"""
#If it has one, check the alphabet:
if isinstance(prefix, tuple):
#TODO - Once we drop support for Python 2.4, instead of this
#loop offload to the string method (requires Python 2.5+).
#Check all the alphabets first...
prefix_strings = [self._get_seq_str_and_check_alphabet(p) \
for p in prefix]
for prefix_str in prefix_strings:
if str(self).startswith(prefix_str, start, end):
return True
return False
else:
prefix_str = self._get_seq_str_and_check_alphabet(prefix)
return str(self).startswith(prefix_str, start, end)
def endswith(self, suffix, start=0, end=sys.maxint):
"""Does the Seq end with the given suffix? Returns True/False.
This behaves like the python string method of the same name.
Return True if the sequence ends with the specified suffix
(a string or another Seq object), False otherwise.
With optional start, test sequence beginning at that position.
With optional end, stop comparing sequence at that position.
suffix can also be a tuple of strings to try. e.g.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_rna.endswith("UUG")
True
>>> my_rna.endswith("AUG")
False
>>> my_rna.endswith("AUG", 0, 18)
True
>>> my_rna.endswith(("UCC","UCA","UUG"))
True
"""
#If it has one, check the alphabet:
if isinstance(suffix, tuple):
#TODO - Once we drop support for Python 2.4, instead of this
#loop offload to the string method (requires Python 2.5+).
#Check all the alphabets first...
suffix_strings = [self._get_seq_str_and_check_alphabet(p) \
for p in suffix]
for suffix_str in suffix_strings:
if str(self).endswith(suffix_str, start, end):
return True
return False
else:
suffix_str = self._get_seq_str_and_check_alphabet(suffix)
return str(self).endswith(suffix_str, start, end)
def split(self, sep=None, maxsplit=-1):
"""Split method, like that of a python string.
This behaves like the python string method of the same name.
Return a list of the 'words' in the string (as Seq objects),
using sep as the delimiter string. If maxsplit is given, at
most maxsplit splits are done. If maxsplit is ommited, all
splits are made.
Following the python string method, sep will by default be any
white space (tabs, spaces, newlines) but this is unlikely to
apply to biological sequences.
e.g.
>>> from Bio.Seq import Seq
>>> my_rna = Seq("GUCAUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAGUUG")
>>> my_aa = my_rna.translate()
>>> my_aa
Seq('VMAIVMGR*KGAR*L', HasStopCodon(ExtendedIUPACProtein(), '*'))
>>> my_aa.split("*")
[Seq('VMAIVMGR', HasStopCodon(ExtendedIUPACProtein(), '*')), Seq('KGAR', HasStopCodon(ExtendedIUPACProtein(), '*')), Seq('L', HasStopCodon(ExtendedIUPACProtein(), '*'))]
>>> my_aa.split("*",1)
[Seq('VMAIVMGR', HasStopCodon(ExtendedIUPACProtein(), '*')), Seq('KGAR*L', HasStopCodon(ExtendedIUPACProtein(), '*'))]
See also the rsplit method:
>>> my_aa.rsplit("*",1)
[Seq('VMAIVMGR*KGAR', HasStopCodon(ExtendedIUPACProtein(), '*')), Seq('L', HasStopCodon(ExtendedIUPACProtein(), '*'))]
"""
#If it has one, check the alphabet:
sep_str = self._get_seq_str_and_check_alphabet(sep)
#TODO - If the sep is the defined stop symbol, or gap char,
#should we adjust the alphabet?
return [Seq(part, self.alphabet) \
for part in str(self).split(sep_str, maxsplit)]
def rsplit(self, sep=None, maxsplit=-1):
"""Right split method, like that of a python string.
This behaves like the python string method of the same name.
Return a list of the 'words' in the string (as Seq objects),
using sep as the delimiter string. If maxsplit is given, at
most maxsplit splits are done COUNTING FROM THE RIGHT.
If maxsplit is ommited, all splits are made.
Following the python string method, sep will by default be any
white space (tabs, spaces, newlines) but this is unlikely to
apply to biological sequences.
e.g. print my_seq.rsplit("*",1)
See also the split method.
"""
#If it has one, check the alphabet:
sep_str = self._get_seq_str_and_check_alphabet(sep)
return [Seq(part, self.alphabet) \
for part in str(self).rsplit(sep_str, maxsplit)]
def strip(self, chars=None):
"""Returns a new Seq object with leading and trailing ends stripped.
This behaves like the python string method of the same name.
Optional argument chars defines which characters to remove. If
ommitted or None (default) then as for the python string method,
this defaults to removing any white space.
e.g. print my_seq.strip("-")
See also the lstrip and rstrip methods.
"""
#If it has one, check the alphabet:
strip_str = self._get_seq_str_and_check_alphabet(chars)
return Seq(str(self).strip(strip_str), self.alphabet)
def lstrip(self, chars=None):
"""Returns a new Seq object with leading (left) end stripped.
This behaves like the python string method of the same name.
Optional argument chars defines which characters to remove. If
ommitted or None (default) then as for the python string method,
this defaults to removing any white space.
e.g. print my_seq.lstrip("-")
See also the strip and rstrip methods.
"""
#If it has one, check the alphabet:
strip_str = self._get_seq_str_and_check_alphabet(chars)
return Seq(str(self).lstrip(strip_str), self.alphabet)
def rstrip(self, chars=None):
"""Returns a new Seq object with trailing (right) end stripped.
This behaves like the python string method of the same name.
Optional argument chars defines which characters to remove. If
ommitted or None (default) then as for the python string method,
this defaults to removing any white space.
e.g. Removing a nucleotide sequence's polyadenylation (poly-A tail):
>>> from Bio.Alphabet import IUPAC
>>> from Bio.Seq import Seq
>>> my_seq = Seq("CGGTACGCTTATGTCACGTAGAAAAAA", IUPAC.unambiguous_dna)
>>> my_seq
Seq('CGGTACGCTTATGTCACGTAGAAAAAA', IUPACUnambiguousDNA())
>>> my_seq.rstrip("A")
Seq('CGGTACGCTTATGTCACGTAG', IUPACUnambiguousDNA())
See also the strip and lstrip methods.
"""
#If it has one, check the alphabet:
strip_str = self._get_seq_str_and_check_alphabet(chars)
return Seq(str(self).rstrip(strip_str), self.alphabet)
def upper(self):
"""Returns an upper case copy of the sequence.
>>> from Bio.Alphabet import HasStopCodon, generic_protein
>>> from Bio.Seq import Seq
>>> my_seq = Seq("VHLTPeeK*", HasStopCodon(generic_protein))
>>> my_seq
Seq('VHLTPeeK*', HasStopCodon(ProteinAlphabet(), '*'))
>>> my_seq.lower()
Seq('vhltpeek*', HasStopCodon(ProteinAlphabet(), '*'))
>>> my_seq.upper()
Seq('VHLTPEEK*', HasStopCodon(ProteinAlphabet(), '*'))
This will adjust the alphabet if required. See also the lower method.
"""
return Seq(str(self).upper(), self.alphabet._upper())
def lower(self):
"""Returns a lower case copy of the sequence.
This will adjust the alphabet if required. Note that the IUPAC alphabets
are upper case only, and thus a generic alphabet must be substituted.
>>> from Bio.Alphabet import Gapped, generic_dna
>>> from Bio.Alphabet import IUPAC
>>> from Bio.Seq import Seq
>>> my_seq = Seq("CGGTACGCTTATGTCACGTAG*AAAAAA", Gapped(IUPAC.unambiguous_dna, "*"))
>>> my_seq
Seq('CGGTACGCTTATGTCACGTAG*AAAAAA', Gapped(IUPACUnambiguousDNA(), '*'))
>>> my_seq.lower()
Seq('cggtacgcttatgtcacgtag*aaaaaa', Gapped(DNAAlphabet(), '*'))
See also the upper method.
"""
return Seq(str(self).lower(), self.alphabet._lower())
def complement(self):
"""Returns the complement sequence. New Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_dna = Seq("CCCCCGATAG", IUPAC.unambiguous_dna)
>>> my_dna
Seq('CCCCCGATAG', IUPACUnambiguousDNA())
>>> my_dna.complement()
Seq('GGGGGCTATC', IUPACUnambiguousDNA())
You can of course used mixed case sequences,
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_dna = Seq("CCCCCgatA-GD", generic_dna)
>>> my_dna
Seq('CCCCCgatA-GD', DNAAlphabet())
>>> my_dna.complement()
Seq('GGGGGctaT-CH', DNAAlphabet())
Note in the above example, ambiguous character D denotes
G, A or T so its complement is H (for C, T or A).
Trying to complement a protein sequence raises an exception.
>>> my_protein = Seq("MAIVMGR", IUPAC.protein)
>>> my_protein.complement()
Traceback (most recent call last):
...
ValueError: Proteins do not have complements!
"""
base = Alphabet._get_base_alphabet(self.alphabet)
if isinstance(base, Alphabet.ProteinAlphabet):
raise ValueError("Proteins do not have complements!")
if isinstance(base, Alphabet.DNAAlphabet):
ttable = _dna_complement_table
elif isinstance(base, Alphabet.RNAAlphabet):
ttable = _rna_complement_table
elif ('U' in self._data or 'u' in self._data) \
and ('T' in self._data or 't' in self._data):
#TODO - Handle this cleanly?
raise ValueError("Mixed RNA/DNA found")
elif 'U' in self._data or 'u' in self._data:
ttable = _rna_complement_table
else:
ttable = _dna_complement_table
#Much faster on really long sequences than the previous loop based one.
#thx to Michael Palmer, University of Waterloo
return Seq(str(self).translate(ttable), self.alphabet)
def reverse_complement(self):
"""Returns the reverse complement sequence. New Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_dna = Seq("CCCCCGATAGNR", IUPAC.ambiguous_dna)
>>> my_dna
Seq('CCCCCGATAGNR', IUPACAmbiguousDNA())
>>> my_dna.reverse_complement()
Seq('YNCTATCGGGGG', IUPACAmbiguousDNA())
Note in the above example, since R = G or A, its complement
is Y (which denotes C or T).
You can of course used mixed case sequences,
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_dna = Seq("CCCCCgatA-G", generic_dna)
>>> my_dna
Seq('CCCCCgatA-G', DNAAlphabet())
>>> my_dna.reverse_complement()
Seq('C-TatcGGGGG', DNAAlphabet())
Trying to complement a protein sequence raises an exception:
>>> my_protein = Seq("MAIVMGR", IUPAC.protein)
>>> my_protein.reverse_complement()
Traceback (most recent call last):
...
ValueError: Proteins do not have complements!
"""
#Use -1 stride/step to reverse the complement
return self.complement()[::-1]
def transcribe(self):
"""Returns the RNA sequence from a DNA sequence. New Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> coding_dna = Seq("ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG",
... IUPAC.unambiguous_dna)
>>> coding_dna
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG', IUPACUnambiguousDNA())
>>> coding_dna.transcribe()
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG', IUPACUnambiguousRNA())
Trying to transcribe a protein or RNA sequence raises an exception:
>>> my_protein = Seq("MAIVMGR", IUPAC.protein)
>>> my_protein.transcribe()
Traceback (most recent call last):
...
ValueError: Proteins cannot be transcribed!
"""
base = Alphabet._get_base_alphabet(self.alphabet)
if isinstance(base, Alphabet.ProteinAlphabet):
raise ValueError("Proteins cannot be transcribed!")
if isinstance(base, Alphabet.RNAAlphabet):
raise ValueError("RNA cannot be transcribed!")
if self.alphabet==IUPAC.unambiguous_dna:
alphabet = IUPAC.unambiguous_rna
elif self.alphabet==IUPAC.ambiguous_dna:
alphabet = IUPAC.ambiguous_rna
else:
alphabet = Alphabet.generic_rna
return Seq(str(self).replace('T','U').replace('t','u'), alphabet)
def back_transcribe(self):
"""Returns the DNA sequence from an RNA sequence. New Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> messenger_rna = Seq("AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG",
... IUPAC.unambiguous_rna)
>>> messenger_rna
Seq('AUGGCCAUUGUAAUGGGCCGCUGAAAGGGUGCCCGAUAG', IUPACUnambiguousRNA())
>>> messenger_rna.back_transcribe()
Seq('ATGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG', IUPACUnambiguousDNA())
Trying to back-transcribe a protein or DNA sequence raises an
exception:
>>> my_protein = Seq("MAIVMGR", IUPAC.protein)
>>> my_protein.back_transcribe()
Traceback (most recent call last):
...
ValueError: Proteins cannot be back transcribed!
"""
base = Alphabet._get_base_alphabet(self.alphabet)
if isinstance(base, Alphabet.ProteinAlphabet):
raise ValueError("Proteins cannot be back transcribed!")
if isinstance(base, Alphabet.DNAAlphabet):
raise ValueError("DNA cannot be back transcribed!")
if self.alphabet==IUPAC.unambiguous_rna:
alphabet = IUPAC.unambiguous_dna
elif self.alphabet==IUPAC.ambiguous_rna:
alphabet = IUPAC.ambiguous_dna
else:
alphabet = Alphabet.generic_dna
return Seq(str(self).replace("U", "T").replace("u", "t"), alphabet)
def translate(self, table="Standard", stop_symbol="*", to_stop=False,
cds=False):
"""Turns a nucleotide sequence into a protein sequence. New Seq object.
This method will translate DNA or RNA sequences, and those with a
nucleotide or generic alphabet. Trying to translate a protein
sequence raises an exception.
Arguments:
- table - Which codon table to use? This can be either a name
(string), an NCBI identifier (integer), or a CodonTable
object (useful for non-standard genetic codes). This
defaults to the "Standard" table.
- stop_symbol - Single character string, what to use for terminators.
This defaults to the asterisk, "*".
- to_stop - Boolean, defaults to False meaning do a full translation
continuing on past any stop codons (translated as the
specified stop_symbol). If True, translation is
terminated at the first in frame stop codon (and the
stop_symbol is not appended to the returned protein
sequence).
- cds - Boolean, indicates this is a complete CDS. If True,
this checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
e.g. Using the standard table:
>>> coding_dna = Seq("GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG")
>>> coding_dna.translate()
Seq('VAIVMGR*KGAR*', HasStopCodon(ExtendedIUPACProtein(), '*'))
>>> coding_dna.translate(stop_symbol="@")
Seq('VAIVMGR@KGAR@', HasStopCodon(ExtendedIUPACProtein(), '@'))
>>> coding_dna.translate(to_stop=True)
Seq('VAIVMGR', ExtendedIUPACProtein())
Now using NCBI table 2, where TGA is not a stop codon:
>>> coding_dna.translate(table=2)
Seq('VAIVMGRWKGAR*', HasStopCodon(ExtendedIUPACProtein(), '*'))
>>> coding_dna.translate(table=2, to_stop=True)
Seq('VAIVMGRWKGAR', ExtendedIUPACProtein())
In fact, GTG is an alternative start codon under NCBI table 2, meaning
this sequence could be a complete CDS:
>>> coding_dna.translate(table=2, cds=True)
Seq('MAIVMGRWKGAR', ExtendedIUPACProtein())
It isn't a valid CDS under NCBI table 1, due to both the start codon and
also the in frame stop codons:
>>> coding_dna.translate(table=1, cds=True)
Traceback (most recent call last):
...
TranslationError: First codon 'GTG' is not a start codon
If the sequence has no in-frame stop codon, then the to_stop argument
has no effect:
>>> coding_dna2 = Seq("TTGGCCATTGTAATGGGCCGC")
>>> coding_dna2.translate()
Seq('LAIVMGR', ExtendedIUPACProtein())
>>> coding_dna2.translate(to_stop=True)
Seq('LAIVMGR', ExtendedIUPACProtein())
NOTE - Ambiguous codons like "TAN" or "NNN" could be an amino acid
or a stop codon. These are translated as "X". Any invalid codon
(e.g. "TA?" or "T-A") will throw a TranslationError.
NOTE - Does NOT support gapped sequences.
NOTE - This does NOT behave like the python string's translate
method. For that use str(my_seq).translate(...) instead.
"""
if isinstance(table, str) and len(table)==256:
raise ValueError("The Seq object translate method DOES NOT take " \
+ "a 256 character string mapping table like " \
+ "the python string object's translate method. " \
+ "Use str(my_seq).translate(...) instead.")
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins cannot be translated!")
try:
table_id = int(table)
except ValueError:
#Assume its a table name
if self.alphabet==IUPAC.unambiguous_dna:
#Will use standard IUPAC protein alphabet, no need for X
codon_table = CodonTable.unambiguous_dna_by_name[table]
elif self.alphabet==IUPAC.unambiguous_rna:
#Will use standard IUPAC protein alphabet, no need for X
codon_table = CodonTable.unambiguous_rna_by_name[table]
else:
#This will use the extended IUPAC protein alphabet with X etc.
#The same table can be used for RNA or DNA (we use this for
#translating strings).
codon_table = CodonTable.ambiguous_generic_by_name[table]
except (AttributeError, TypeError):
#Assume its a CodonTable object
if isinstance(table, CodonTable.CodonTable):
codon_table = table
else:
raise ValueError('Bad table argument')
else:
#Assume its a table ID
if self.alphabet==IUPAC.unambiguous_dna:
#Will use standard IUPAC protein alphabet, no need for X
codon_table = CodonTable.unambiguous_dna_by_id[table_id]
elif self.alphabet==IUPAC.unambiguous_rna:
#Will use standard IUPAC protein alphabet, no need for X
codon_table = CodonTable.unambiguous_rna_by_id[table_id]
else:
#This will use the extended IUPAC protein alphabet with X etc.
#The same table can be used for RNA or DNA (we use this for
#translating strings).
codon_table = CodonTable.ambiguous_generic_by_id[table_id]
protein = _translate_str(str(self), codon_table, \
stop_symbol, to_stop, cds)
if stop_symbol in protein:
alphabet = Alphabet.HasStopCodon(codon_table.protein_alphabet,
stop_symbol = stop_symbol)
else:
alphabet = codon_table.protein_alphabet
return Seq(protein, alphabet)
def ungap(self, gap=None):
"""Return a copy of the sequence without the gap character(s).
The gap character can be specified in two ways - either as an explicit
argument, or via the sequence's alphabet. For example:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_dna = Seq("-ATA--TGAAAT-TTGAAAA", generic_dna)
>>> my_dna
Seq('-ATA--TGAAAT-TTGAAAA', DNAAlphabet())
>>> my_dna.ungap("-")
Seq('ATATGAAATTTGAAAA', DNAAlphabet())
If the gap character is not given as an argument, it will be taken from
the sequence's alphabet (if defined). Notice that the returned sequence's
alphabet is adjusted since it no longer requires a gapped alphabet:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC, Gapped, HasStopCodon
>>> my_pro = Seq("MVVLE=AD*", HasStopCodon(Gapped(IUPAC.protein, "=")))
>>> my_pro
Seq('MVVLE=AD*', HasStopCodon(Gapped(IUPACProtein(), '='), '*'))
>>> my_pro.ungap()
Seq('MVVLEAD*', HasStopCodon(IUPACProtein(), '*'))
Or, with a simpler gapped DNA example:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC, Gapped
>>> my_seq = Seq("CGGGTAG=AAAAAA", Gapped(IUPAC.unambiguous_dna, "="))
>>> my_seq
Seq('CGGGTAG=AAAAAA', Gapped(IUPACUnambiguousDNA(), '='))
>>> my_seq.ungap()
Seq('CGGGTAGAAAAAA', IUPACUnambiguousDNA())
As long as it is consistent with the alphabet, although it is redundant,
you can still supply the gap character as an argument to this method:
>>> my_seq
Seq('CGGGTAG=AAAAAA', Gapped(IUPACUnambiguousDNA(), '='))
>>> my_seq.ungap("=")
Seq('CGGGTAGAAAAAA', IUPACUnambiguousDNA())
However, if the gap character given as the argument disagrees with that
declared in the alphabet, an exception is raised:
>>> my_seq
Seq('CGGGTAG=AAAAAA', Gapped(IUPACUnambiguousDNA(), '='))
>>> my_seq.ungap("-")
Traceback (most recent call last):
...
ValueError: Gap '-' does not match '=' from alphabet
Finally, if a gap character is not supplied, and the alphabet does not
define one, an exception is raised:
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import generic_dna
>>> my_dna = Seq("ATA--TGAAAT-TTGAAAA", generic_dna)
>>> my_dna
Seq('ATA--TGAAAT-TTGAAAA', DNAAlphabet())
>>> my_dna.ungap()
Traceback (most recent call last):
...
ValueError: Gap character not given and not defined in alphabet
"""
if hasattr(self.alphabet, "gap_char"):
if not gap:
gap = self.alphabet.gap_char
elif gap != self.alphabet.gap_char:
raise ValueError("Gap %s does not match %s from alphabet" \
% (repr(gap), repr(self.alphabet.gap_char)))
alpha = Alphabet._ungap(self.alphabet)
elif not gap:
raise ValueError("Gap character not given and not defined in alphabet")
else:
alpha = self.alphabet #modify!
if len(gap)!=1 or not isinstance(gap, str):
raise ValueError("Unexpected gap character, %s" % repr(gap))
return Seq(str(self).replace(gap, ""), alpha)
class UnknownSeq(Seq):
"""A read-only sequence object of known length but unknown contents.
If you have an unknown sequence, you can represent this with a normal
Seq object, for example:
>>> my_seq = Seq("N"*5)
>>> my_seq
Seq('NNNNN', Alphabet())
>>> len(my_seq)
5
>>> print my_seq
NNNNN
However, this is rather wasteful of memory (especially for large
sequences), which is where this class is most usefull:
>>> unk_five = UnknownSeq(5)
>>> unk_five
UnknownSeq(5, alphabet = Alphabet(), character = '?')
>>> len(unk_five)
5
>>> print(unk_five)
?????
You can add unknown sequence together, provided their alphabets and
characters are compatible, and get another memory saving UnknownSeq:
>>> unk_four = UnknownSeq(4)
>>> unk_four
UnknownSeq(4, alphabet = Alphabet(), character = '?')
>>> unk_four + unk_five
UnknownSeq(9, alphabet = Alphabet(), character = '?')
If the alphabet or characters don't match up, the addition gives an
ordinary Seq object:
>>> unk_nnnn = UnknownSeq(4, character = "N")
>>> unk_nnnn
UnknownSeq(4, alphabet = Alphabet(), character = 'N')
>>> unk_nnnn + unk_four
Seq('NNNN????', Alphabet())
Combining with a real Seq gives a new Seq object:
>>> known_seq = Seq("ACGT")
>>> unk_four + known_seq
Seq('????ACGT', Alphabet())
>>> known_seq + unk_four
Seq('ACGT????', Alphabet())
"""
def __init__(self, length, alphabet = Alphabet.generic_alphabet, character = None):
"""Create a new UnknownSeq object.
If character is ommited, it is determed from the alphabet, "N" for
nucleotides, "X" for proteins, and "?" otherwise.
"""
self._length = int(length)
if self._length < 0:
#TODO - Block zero length UnknownSeq? You can just use a Seq!
raise ValueError("Length must not be negative.")
self.alphabet = alphabet
if character:
if len(character) != 1:
raise ValueError("character argument should be a single letter string.")
self._character = character
else:
base = Alphabet._get_base_alphabet(alphabet)
#TODO? Check the case of the letters in the alphabet?
#We may have to use "n" instead of "N" etc.
if isinstance(base, Alphabet.NucleotideAlphabet):
self._character = "N"
elif isinstance(base, Alphabet.ProteinAlphabet):
self._character = "X"
else:
self._character = "?"
def __len__(self):
"""Returns the stated length of the unknown sequence."""
return self._length
def __str__(self):
"""Returns the unknown sequence as full string of the given length."""
return self._character * self._length
def __repr__(self):
return "UnknownSeq(%i, alphabet = %s, character = %s)" \
% (self._length, repr(self.alphabet), repr(self._character))
def __add__(self, other):
"""Add another sequence or string to this sequence.
Adding two UnknownSeq objects returns another UnknownSeq object
provided the character is the same and the alphabets are compatible.
>>> from Bio.Seq import UnknownSeq
>>> from Bio.Alphabet import generic_protein
>>> UnknownSeq(10, generic_protein) + UnknownSeq(5, generic_protein)
UnknownSeq(15, alphabet = ProteinAlphabet(), character = 'X')
If the characters differ, an UnknownSeq object cannot be used, so a
Seq object is returned:
>>> from Bio.Seq import UnknownSeq
>>> from Bio.Alphabet import generic_protein
>>> UnknownSeq(10, generic_protein) + UnknownSeq(5, generic_protein,
... character="x")
Seq('XXXXXXXXXXxxxxx', ProteinAlphabet())
If adding a string to an UnknownSeq, a new Seq is returned with the
same alphabet:
>>> from Bio.Seq import UnknownSeq
>>> from Bio.Alphabet import generic_protein
>>> UnknownSeq(5, generic_protein) + "LV"
Seq('XXXXXLV', ProteinAlphabet())
"""
if isinstance(other, UnknownSeq) \
and other._character == self._character:
#TODO - Check the alphabets match
return UnknownSeq(len(self)+len(other),
self.alphabet, self._character)
#Offload to the base class...
return Seq(str(self), self.alphabet) + other
def __radd__(self, other):
#If other is an UnknownSeq, then __add__ would be called.
#Offload to the base class...
return other + Seq(str(self), self.alphabet)
def __getitem__(self, index):
"""Get a subsequence from the UnknownSeq object.
>>> unk = UnknownSeq(8, character="N")
>>> print unk[:]
NNNNNNNN
>>> print unk[5:3]
<BLANKLINE>
>>> print unk[1:-1]
NNNNNN
>>> print unk[1:-1:2]
NNN
"""
if isinstance(index, int):
#TODO - Check the bounds without wasting memory
return str(self)[index]
old_length = self._length
step = index.step
if step is None or step == 1:
#This calculates the length you'd get from ("N"*old_length)[index]
start = index.start
end = index.stop
if start is None:
start = 0
elif start < 0:
start = max(0, old_length + start)
elif start > old_length:
start = old_length
if end is None:
end = old_length
elif end < 0:
end = max(0, old_length + end)
elif end > old_length:
end = old_length
new_length = max(0, end-start)
elif step == 0:
raise ValueError("slice step cannot be zero")
else:
#TODO - handle step efficiently
new_length = len(("X"*old_length)[index])
#assert new_length == len(("X"*old_length)[index]), \
# (index, start, end, step, old_length,
# new_length, len(("X"*old_length)[index]))
return UnknownSeq(new_length, self.alphabet, self._character)
def count(self, sub, start=0, end=sys.maxint):
"""Non-overlapping count method, like that of a python string.
This behaves like the python string (and Seq object) method of the
same name, which does a non-overlapping count!
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
>>> "NNNN".count("N")
4
>>> Seq("NNNN").count("N")
4
>>> UnknownSeq(4, character="N").count("N")
4
>>> UnknownSeq(4, character="N").count("A")
0
>>> UnknownSeq(4, character="N").count("AA")
0
HOWEVER, please note because that python strings and Seq objects (and
MutableSeq objects) do a non-overlapping search, this may not give
the answer you expect:
>>> UnknownSeq(4, character="N").count("NN")
2
>>> UnknownSeq(4, character="N").count("NNN")
1
"""
sub_str = self._get_seq_str_and_check_alphabet(sub)
if len(sub_str) == 1:
if str(sub_str) == self._character:
if start==0 and end >= self._length:
return self._length
else:
#This could be done more cleverly...
return str(self).count(sub_str, start, end)
else:
return 0
else:
if set(sub_str) == set(self._character):
if start==0 and end >= self._length:
return self._length // len(sub_str)
else:
#This could be done more cleverly...
return str(self).count(sub_str, start, end)
else:
return 0
def complement(self):
"""The complement of an unknown nucleotide equals itself.
>>> my_nuc = UnknownSeq(8)
>>> my_nuc
UnknownSeq(8, alphabet = Alphabet(), character = '?')
>>> print my_nuc
????????
>>> my_nuc.complement()
UnknownSeq(8, alphabet = Alphabet(), character = '?')
>>> print my_nuc.complement()
????????
"""
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins do not have complements!")
return self
def reverse_complement(self):
"""The reverse complement of an unknown nucleotide equals itself.
>>> my_nuc = UnknownSeq(10)
>>> my_nuc
UnknownSeq(10, alphabet = Alphabet(), character = '?')
>>> print my_nuc
??????????
>>> my_nuc.reverse_complement()
UnknownSeq(10, alphabet = Alphabet(), character = '?')
>>> print my_nuc.reverse_complement()
??????????
"""
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins do not have complements!")
return self
def transcribe(self):
"""Returns unknown RNA sequence from an unknown DNA sequence.
>>> my_dna = UnknownSeq(10, character="N")
>>> my_dna
UnknownSeq(10, alphabet = Alphabet(), character = 'N')
>>> print my_dna
NNNNNNNNNN
>>> my_rna = my_dna.transcribe()
>>> my_rna
UnknownSeq(10, alphabet = RNAAlphabet(), character = 'N')
>>> print my_rna
NNNNNNNNNN
"""
#Offload the alphabet stuff
s = Seq(self._character, self.alphabet).transcribe()
return UnknownSeq(self._length, s.alphabet, self._character)
def back_transcribe(self):
"""Returns unknown DNA sequence from an unknown RNA sequence.
>>> my_rna = UnknownSeq(20, character="N")
>>> my_rna
UnknownSeq(20, alphabet = Alphabet(), character = 'N')
>>> print my_rna
NNNNNNNNNNNNNNNNNNNN
>>> my_dna = my_rna.back_transcribe()
>>> my_dna
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'N')
>>> print my_dna
NNNNNNNNNNNNNNNNNNNN
"""
#Offload the alphabet stuff
s = Seq(self._character, self.alphabet).back_transcribe()
return UnknownSeq(self._length, s.alphabet, self._character)
def upper(self):
"""Returns an upper case copy of the sequence.
>>> from Bio.Alphabet import generic_dna
>>> from Bio.Seq import UnknownSeq
>>> my_seq = UnknownSeq(20, generic_dna, character="n")
>>> my_seq
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'n')
>>> print my_seq
nnnnnnnnnnnnnnnnnnnn
>>> my_seq.upper()
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'N')
>>> print my_seq.upper()
NNNNNNNNNNNNNNNNNNNN
This will adjust the alphabet if required. See also the lower method.
"""
return UnknownSeq(self._length, self.alphabet._upper(), self._character.upper())
def lower(self):
"""Returns a lower case copy of the sequence.
This will adjust the alphabet if required:
>>> from Bio.Alphabet import IUPAC
>>> from Bio.Seq import UnknownSeq
>>> my_seq = UnknownSeq(20, IUPAC.extended_protein)
>>> my_seq
UnknownSeq(20, alphabet = ExtendedIUPACProtein(), character = 'X')
>>> print my_seq
XXXXXXXXXXXXXXXXXXXX
>>> my_seq.lower()
UnknownSeq(20, alphabet = ProteinAlphabet(), character = 'x')
>>> print my_seq.lower()
xxxxxxxxxxxxxxxxxxxx
See also the upper method.
"""
return UnknownSeq(self._length, self.alphabet._lower(), self._character.lower())
def translate(self, **kwargs):
"""Translate an unknown nucleotide sequence into an unknown protein.
e.g.
>>> my_seq = UnknownSeq(11, character="N")
>>> print my_seq
NNNNNNNNNNN
>>> my_protein = my_seq.translate()
>>> my_protein
UnknownSeq(3, alphabet = ProteinAlphabet(), character = 'X')
>>> print my_protein
XXX
In comparison, using a normal Seq object:
>>> my_seq = Seq("NNNNNNNNNNN")
>>> print my_seq
NNNNNNNNNNN
>>> my_protein = my_seq.translate()
>>> my_protein
Seq('XXX', ExtendedIUPACProtein())
>>> print my_protein
XXX
"""
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins cannot be translated!")
return UnknownSeq(self._length//3, Alphabet.generic_protein, "X")
def ungap(self, gap=None):
"""Return a copy of the sequence without the gap character(s).
The gap character can be specified in two ways - either as an explicit
argument, or via the sequence's alphabet. For example:
>>> from Bio.Seq import UnknownSeq
>>> from Bio.Alphabet import Gapped, generic_dna
>>> my_dna = UnknownSeq(20, Gapped(generic_dna,"-"))
>>> my_dna
UnknownSeq(20, alphabet = Gapped(DNAAlphabet(), '-'), character = 'N')
>>> my_dna.ungap()
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'N')
>>> my_dna.ungap("-")
UnknownSeq(20, alphabet = DNAAlphabet(), character = 'N')
If the UnknownSeq is using the gap character, then an empty Seq is
returned:
>>> my_gap = UnknownSeq(20, Gapped(generic_dna,"-"), character="-")
>>> my_gap
UnknownSeq(20, alphabet = Gapped(DNAAlphabet(), '-'), character = '-')
>>> my_gap.ungap()
Seq('', DNAAlphabet())
>>> my_gap.ungap("-")
Seq('', DNAAlphabet())
Notice that the returned sequence's alphabet is adjusted to remove any
explicit gap character declaration.
"""
#Offload the alphabet stuff
s = Seq(self._character, self.alphabet).ungap()
if s :
return UnknownSeq(self._length, s.alphabet, self._character)
else :
return Seq("", s.alphabet)
class MutableSeq(object):
"""An editable sequence object (with an alphabet).
Unlike normal python strings and our basic sequence object (the Seq class)
which are immuatable, the MutableSeq lets you edit the sequence in place.
However, this means you cannot use a MutableSeq object as a dictionary key.
>>> from Bio.Seq import MutableSeq
>>> from Bio.Alphabet import generic_dna
>>> my_seq = MutableSeq("ACTCGTCGTCG", generic_dna)
>>> my_seq
MutableSeq('ACTCGTCGTCG', DNAAlphabet())
>>> my_seq[5]
'T'
>>> my_seq[5] = "A"
>>> my_seq
MutableSeq('ACTCGACGTCG', DNAAlphabet())
>>> my_seq[5]
'A'
>>> my_seq[5:8] = "NNN"
>>> my_seq
MutableSeq('ACTCGNNNTCG', DNAAlphabet())
>>> len(my_seq)
11
Note that the MutableSeq object does not support as many string-like
or biological methods as the Seq object.
"""
def __init__(self, data, alphabet = Alphabet.generic_alphabet):
if sys.version_info[0] == 3:
self.array_indicator = "u"
else:
self.array_indicator = "c"
if isinstance(data, str): #TODO - What about unicode?
self.data = array.array(self.array_indicator, data)
else:
self.data = data # assumes the input is an array
self.alphabet = alphabet
def __repr__(self):
"""Returns a (truncated) representation of the sequence for debugging."""
if len(self) > 60:
#Shows the last three letters as it is often useful to see if there
#is a stop codon at the end of a sequence.
#Note total length is 54+3+3=60
return "%s('%s...%s', %s)" % (self.__class__.__name__,
str(self[:54]), str(self[-3:]),
repr(self.alphabet))
else:
return "%s('%s', %s)" % (self.__class__.__name__,
str(self),
repr(self.alphabet))
def __str__(self):
"""Returns the full sequence as a python string.
Note that Biopython 1.44 and earlier would give a truncated
version of repr(my_seq) for str(my_seq). If you are writing code
which needs to be backwards compatible with old Biopython, you
should continue to use my_seq.tostring() rather than str(my_seq).
"""
#See test_GAQueens.py for an historic usage of a non-string alphabet!
return "".join(self.data)
def __cmp__(self, other):
"""Compare the sequence to another sequence or a string (README).
Currently if compared to another sequence the alphabets must be
compatible. Comparing DNA to RNA, or Nucleotide to Protein will raise
an exception. Otherwise only the sequence itself is compared, not the
precise alphabet.
A future release of Biopython will change this (and the Seq object etc)
to use simple string comparison. The plan is that comparing sequences
with incompatible alphabets (e.g. DNA to RNA) will trigger a warning
but not an exception.
During this transition period, please just do explicit comparisons:
>>> seq1 = MutableSeq("ACGT")
>>> seq2 = MutableSeq("ACGT")
>>> id(seq1) == id(seq2)
False
>>> str(seq1) == str(seq2)
True
This method indirectly supports ==, < , etc.
"""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
import warnings
warnings.warn("In future comparing incompatible alphabets will "
"only trigger a warning (not an exception). In "
"the interim please use id(seq1)==id(seq2) or "
"str(seq1)==str(seq2) to make your code explicit "
"and to avoid this warning.", FutureWarning)
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
if isinstance(other, MutableSeq):
#See test_GAQueens.py for an historic usage of a non-string
#alphabet! Comparing the arrays supports this.
return cmp(self.data, other.data)
else:
return cmp(str(self), str(other))
elif isinstance(other, basestring):
return cmp(str(self), other)
else:
raise TypeError
def __len__(self): return len(self.data)
def __getitem__(self, index):
#Note since Python 2.0, __getslice__ is deprecated
#and __getitem__ is used instead.
#See http://docs.python.org/ref/sequence-methods.html
if isinstance(index, int):
#Return a single letter as a string
return self.data[index]
else:
#Return the (sub)sequence as another Seq object
return MutableSeq(self.data[index], self.alphabet)
def __setitem__(self, index, value):
#Note since Python 2.0, __setslice__ is deprecated
#and __setitem__ is used instead.
#See http://docs.python.org/ref/sequence-methods.html
if isinstance(index, int):
#Replacing a single letter with a new string
self.data[index] = value
else:
#Replacing a sub-sequence
if isinstance(value, MutableSeq):
self.data[index] = value.data
elif isinstance(value, type(self.data)):
self.data[index] = value
else:
self.data[index] = array.array(self.array_indicator,
str(value))
def __delitem__(self, index):
#Note since Python 2.0, __delslice__ is deprecated
#and __delitem__ is used instead.
#See http://docs.python.org/ref/sequence-methods.html
#Could be deleting a single letter, or a slice
del self.data[index]
def __add__(self, other):
"""Add another sequence or string to this sequence.
Returns a new MutableSeq object."""
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
a = Alphabet._consensus_alphabet([self.alphabet, other.alphabet])
if isinstance(other, MutableSeq):
#See test_GAQueens.py for an historic usage of a non-string
#alphabet! Adding the arrays should support this.
return self.__class__(self.data + other.data, a)
else:
return self.__class__(str(self) + str(other), a)
elif isinstance(other, basestring):
#other is a plain string - use the current alphabet
return self.__class__(str(self) + str(other), self.alphabet)
else:
raise TypeError
def __radd__(self, other):
if hasattr(other, "alphabet"):
#other should be a Seq or a MutableSeq
if not Alphabet._check_type_compatible([self.alphabet,
other.alphabet]):
raise TypeError("Incompatable alphabets %s and %s" \
% (repr(self.alphabet), repr(other.alphabet)))
#They should be the same sequence type (or one of them is generic)
a = Alphabet._consensus_alphabet([self.alphabet, other.alphabet])
if isinstance(other, MutableSeq):
#See test_GAQueens.py for an historic usage of a non-string
#alphabet! Adding the arrays should support this.
return self.__class__(other.data + self.data, a)
else:
return self.__class__(str(other) + str(self), a)
elif isinstance(other, basestring):
#other is a plain string - use the current alphabet
return self.__class__(str(other) + str(self), self.alphabet)
else:
raise TypeError
def append(self, c):
self.data.append(c)
def insert(self, i, c):
self.data.insert(i, c)
def pop(self, i = (-1)):
c = self.data[i]
del self.data[i]
return c
def remove(self, item):
for i in range(len(self.data)):
if self.data[i] == item:
del self.data[i]
return
raise ValueError("MutableSeq.remove(x): x not in list")
def count(self, sub, start=0, end=sys.maxint):
"""Non-overlapping count method, like that of a python string.
This behaves like the python string method of the same name,
which does a non-overlapping count!
Returns an integer, the number of occurrences of substring
argument sub in the (sub)sequence given by [start:end].
Optional arguments start and end are interpreted as in slice
notation.
Arguments:
- sub - a string or another Seq object to look for
- start - optional integer, slice start
- end - optional integer, slice end
e.g.
>>> from Bio.Seq import MutableSeq
>>> my_mseq = MutableSeq("AAAATGA")
>>> print my_mseq.count("A")
5
>>> print my_mseq.count("ATG")
1
>>> print my_mseq.count(Seq("AT"))
1
>>> print my_mseq.count("AT", 2, -1)
1
HOWEVER, please note because that python strings, Seq objects and
MutableSeq objects do a non-overlapping search, this may not give
the answer you expect:
>>> "AAAA".count("AA")
2
>>> print MutableSeq("AAAA").count("AA")
2
A non-overlapping search would give the answer as three!
"""
try:
#TODO - Should we check the alphabet?
search = sub.tostring()
except AttributeError:
search = sub
if not isinstance(search, basestring):
raise TypeError("expected a string, Seq or MutableSeq")
if len(search) == 1:
#Try and be efficient and work directly from the array.
count = 0
for c in self.data[start:end]:
if c == search: count += 1
return count
else:
#TODO - Can we do this more efficiently?
return self.tostring().count(search, start, end)
def index(self, item):
for i in range(len(self.data)):
if self.data[i] == item:
return i
raise ValueError("MutableSeq.index(x): x not in list")
def reverse(self):
"""Modify the mutable sequence to reverse itself.
No return value.
"""
self.data.reverse()
def complement(self):
"""Modify the mutable sequence to take on its complement.
Trying to complement a protein sequence raises an exception.
No return value.
"""
if isinstance(Alphabet._get_base_alphabet(self.alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Proteins do not have complements!")
if self.alphabet in (IUPAC.ambiguous_dna, IUPAC.unambiguous_dna):
d = ambiguous_dna_complement
elif self.alphabet in (IUPAC.ambiguous_rna, IUPAC.unambiguous_rna):
d = ambiguous_rna_complement
elif 'U' in self.data and 'T' in self.data:
#TODO - Handle this cleanly?
raise ValueError("Mixed RNA/DNA found")
elif 'U' in self.data:
d = ambiguous_rna_complement
else:
d = ambiguous_dna_complement
c = dict([(x.lower(), y.lower()) for x,y in d.iteritems()])
d.update(c)
self.data = map(lambda c: d[c], self.data)
self.data = array.array(self.array_indicator, self.data)
def reverse_complement(self):
"""Modify the mutable sequence to take on its reverse complement.
Trying to reverse complement a protein sequence raises an exception.
No return value.
"""
self.complement()
self.data.reverse()
## Sorting a sequence makes no sense.
# def sort(self, *args): self.data.sort(*args)
def extend(self, other):
if isinstance(other, MutableSeq):
for c in other.data:
self.data.append(c)
else:
for c in other:
self.data.append(c)
def tostring(self):
"""Returns the full sequence as a python string (semi-obsolete).
Although not formally deprecated, you are now encouraged to use
str(my_seq) instead of my_seq.tostring().
Because str(my_seq) will give you the full sequence as a python string,
there is often no need to make an explicit conversion. For example,
print "ID={%s}, sequence={%s}" % (my_name, my_seq)
On Biopython 1.44 or older you would have to have done this:
print "ID={%s}, sequence={%s}" % (my_name, my_seq.tostring())
"""
return "".join(self.data)
def toseq(self):
"""Returns the full sequence as a new immutable Seq object.
>>> from Bio.Seq import Seq
>>> from Bio.Alphabet import IUPAC
>>> my_mseq = MutableSeq("MKQHKAMIVALIVICITAVVAAL",
... IUPAC.protein)
>>> my_mseq
MutableSeq('MKQHKAMIVALIVICITAVVAAL', IUPACProtein())
>>> my_mseq.toseq()
Seq('MKQHKAMIVALIVICITAVVAAL', IUPACProtein())
Note that the alphabet is preserved.
"""
return Seq("".join(self.data), self.alphabet)
# The transcribe, backward_transcribe, and translate functions are
# user-friendly versions of the corresponding functions in Bio.Transcribe
# and Bio.Translate. The functions work both on Seq objects, and on strings.
def transcribe(dna):
"""Transcribes a DNA sequence into RNA.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object with an RNA alphabet.
Trying to transcribe a protein or RNA sequence raises an exception.
e.g.
>>> transcribe("ACTGN")
'ACUGN'
"""
if isinstance(dna, Seq):
return dna.transcribe()
elif isinstance(dna, MutableSeq):
return dna.toseq().transcribe()
else:
return dna.replace('T','U').replace('t','u')
def back_transcribe(rna):
"""Back-transcribes an RNA sequence into DNA.
If given a string, returns a new string object.
Given a Seq or MutableSeq, returns a new Seq object with an RNA alphabet.
Trying to transcribe a protein or DNA sequence raises an exception.
e.g.
>>> back_transcribe("ACUGN")
'ACTGN'
"""
if isinstance(rna, Seq):
return rna.back_transcribe()
elif isinstance(rna, MutableSeq):
return rna.toseq().back_transcribe()
else:
return rna.replace('U','T').replace('u','t')
def _translate_str(sequence, table, stop_symbol="*", to_stop=False,
cds=False, pos_stop="X"):
"""Helper function to translate a nucleotide string (PRIVATE).
Arguments:
- sequence - a string
- table - a CodonTable object (NOT a table name or id number)
- stop_symbol - a single character string, what to use for terminators.
- to_stop - boolean, should translation terminate at the first
in frame stop codon? If there is no in-frame stop codon
then translation continues to the end.
- pos_stop - a single character string for a possible stop codon
(e.g. TAN or NNN)
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
Returns a string.
e.g.
>>> from Bio.Data import CodonTable
>>> table = CodonTable.ambiguous_dna_by_id[1]
>>> _translate_str("AAA", table)
'K'
>>> _translate_str("TAR", table)
'*'
>>> _translate_str("TAN", table)
'X'
>>> _translate_str("TAN", table, pos_stop="@")
'@'
>>> _translate_str("TA?", table)
Traceback (most recent call last):
...
TranslationError: Codon 'TA?' is invalid
>>> _translate_str("ATGCCCTAG", table, cds=True)
'MP'
>>> _translate_str("AAACCCTAG", table, cds=True)
Traceback (most recent call last):
...
TranslationError: First codon 'AAA' is not a start codon
>>> _translate_str("ATGCCCTAGCCCTAG", table, cds=True)
Traceback (most recent call last):
...
TranslationError: Extra in frame stop codon found.
"""
sequence = sequence.upper()
amino_acids = []
forward_table = table.forward_table
stop_codons = table.stop_codons
if table.nucleotide_alphabet.letters is not None:
valid_letters = set(table.nucleotide_alphabet.letters.upper())
else:
#Assume the worst case, ambiguous DNA or RNA:
valid_letters = set(IUPAC.ambiguous_dna.letters.upper() + \
IUPAC.ambiguous_rna.letters.upper())
if cds:
if str(sequence[:3]).upper() not in table.start_codons:
raise CodonTable.TranslationError(\
"First codon '%s' is not a start codon" % sequence[:3])
if len(sequence) % 3 != 0:
raise CodonTable.TranslationError(\
"Sequence length %i is not a multiple of three" % len(sequence))
if str(sequence[-3:]).upper() not in stop_codons:
raise CodonTable.TranslationError(\
"Final codon '%s' is not a stop codon" % sequence[-3:])
#Don't translate the stop symbol, and manually translate the M
sequence = sequence[3:-3]
amino_acids = ["M"]
n = len(sequence)
for i in xrange(0,n-n%3,3):
codon = sequence[i:i+3]
try:
amino_acids.append(forward_table[codon])
except (KeyError, CodonTable.TranslationError):
#Todo? Treat "---" as a special case (gapped translation)
if codon in table.stop_codons:
if cds:
raise CodonTable.TranslationError(\
"Extra in frame stop codon found.")
if to_stop : break
amino_acids.append(stop_symbol)
elif valid_letters.issuperset(set(codon)):
#Possible stop codon (e.g. NNN or TAN)
amino_acids.append(pos_stop)
else:
raise CodonTable.TranslationError(\
"Codon '%s' is invalid" % codon)
return "".join(amino_acids)
def translate(sequence, table="Standard", stop_symbol="*", to_stop=False,
cds=False):
"""Translate a nucleotide sequence into amino acids.
If given a string, returns a new string object. Given a Seq or
MutableSeq, returns a Seq object with a protein alphabet.
Arguments:
- table - Which codon table to use? This can be either a name (string),
an NCBI identifier (integer), or a CodonTable object (useful
for non-standard genetic codes). Defaults to the "Standard"
table.
- stop_symbol - Single character string, what to use for any
terminators, defaults to the asterisk, "*".
- to_stop - Boolean, defaults to False meaning do a full
translation continuing on past any stop codons
(translated as the specified stop_symbol). If
True, translation is terminated at the first in
frame stop codon (and the stop_symbol is not
appended to the returned protein sequence).
- cds - Boolean, indicates this is a complete CDS. If True, this
checks the sequence starts with a valid alternative start
codon (which will be translated as methionine, M), that the
sequence length is a multiple of three, and that there is a
single in frame stop codon at the end (this will be excluded
from the protein sequence, regardless of the to_stop option).
If these tests fail, an exception is raised.
A simple string example using the default (standard) genetic code:
>>> coding_dna = "GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG"
>>> translate(coding_dna)
'VAIVMGR*KGAR*'
>>> translate(coding_dna, stop_symbol="@")
'VAIVMGR@KGAR@'
>>> translate(coding_dna, to_stop=True)
'VAIVMGR'
Now using NCBI table 2, where TGA is not a stop codon:
>>> translate(coding_dna, table=2)
'VAIVMGRWKGAR*'
>>> translate(coding_dna, table=2, to_stop=True)
'VAIVMGRWKGAR'
In fact this example uses an alternative start codon valid under NCBI table 2,
GTG, which means this example is a complete valid CDS which when translated
should really start with methionine (not valine):
>>> translate(coding_dna, table=2, cds=True)
'MAIVMGRWKGAR'
Note that if the sequence has no in-frame stop codon, then the to_stop
argument has no effect:
>>> coding_dna2 = "GTGGCCATTGTAATGGGCCGC"
>>> translate(coding_dna2)
'VAIVMGR'
>>> translate(coding_dna2, to_stop=True)
'VAIVMGR'
NOTE - Ambiguous codons like "TAN" or "NNN" could be an amino acid
or a stop codon. These are translated as "X". Any invalid codon
(e.g. "TA?" or "T-A") will throw a TranslationError.
NOTE - Does NOT support gapped sequences.
It will however translate either DNA or RNA.
"""
if isinstance(sequence, Seq):
return sequence.translate(table, stop_symbol, to_stop, cds)
elif isinstance(sequence, MutableSeq):
#Return a Seq object
return sequence.toseq().translate(table, stop_symbol, to_stop, cds)
else:
#Assume its a string, return a string
try:
codon_table = CodonTable.ambiguous_generic_by_id[int(table)]
except ValueError:
codon_table = CodonTable.ambiguous_generic_by_name[table]
except (AttributeError, TypeError):
if isinstance(table, CodonTable.CodonTable):
codon_table = table
else:
raise ValueError('Bad table argument')
return _translate_str(sequence, codon_table, stop_symbol, to_stop, cds)
def reverse_complement(sequence):
"""Returns the reverse complement sequence of a nucleotide string.
If given a string, returns a new string object.
Given a Seq or a MutableSeq, returns a new Seq object with the same alphabet.
Supports unambiguous and ambiguous nucleotide sequences.
e.g.
>>> reverse_complement("ACTG-NH")
'DN-CAGT'
"""
if isinstance(sequence, Seq):
#Return a Seq
return sequence.reverse_complement()
elif isinstance(sequence, MutableSeq):
#Return a Seq
#Don't use the MutableSeq reverse_complement method as it is 'in place'.
return sequence.toseq().reverse_complement()
#Assume its a string.
#In order to avoid some code duplication, the old code would turn the string
#into a Seq, use the reverse_complement method, and convert back to a string.
#This worked, but is over five times slower on short sequences!
if ('U' in sequence or 'u' in sequence) \
and ('T' in sequence or 't' in sequence):
raise ValueError("Mixed RNA/DNA found")
elif 'U' in sequence or 'u' in sequence:
ttable = _rna_complement_table
else:
ttable = _dna_complement_table
return sequence.translate(ttable)[::-1]
def _test():
"""Run the Bio.Seq module's doctests (PRIVATE)."""
if sys.version_info[0:2] == (3,1):
print "Not running Bio.Seq doctest on Python 3.1"
print "See http://bugs.python.org/issue7490"
else:
print "Runing doctests..."
import doctest
doctest.testmod(optionflags=doctest.IGNORE_EXCEPTION_DETAIL)
print "Done"
if __name__ == "__main__":
_test()
|
BlogomaticProject/Blogomatic
|
opt/blog-o-matic/usr/lib/python/Bio/Seq.py
|
Python
|
gpl-2.0
| 84,964
|
[
"Biopython"
] |
9f18fdd3ac6e3ccd8f9d47963851fb2f31310b9ca95f029943de47670488b00f
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, absolute_import
"""
This module provides conversion between the Atomic Simulation Environment
Atoms object and pymatgen Structure objects.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 8, 2012"
from pymatgen.core.structure import Structure
try:
from ase import Atoms
ase_loaded = True
except ImportError:
ase_loaded = False
class AseAtomsAdaptor(object):
"""
Adaptor serves as a bridge between ASE Atoms and pymatgen structure.
"""
@staticmethod
def get_atoms(structure):
"""
Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
Returns:
ASE Atoms object
"""
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True, cell=cell)
@staticmethod
<<<<<<< HEAD
def get_structure(atoms):
=======
def get_structure(atoms, cls=None):
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
"""
Returns pymatgen structure from ASE Atoms.
Args:
atoms: ASE Atoms object
<<<<<<< HEAD
=======
cls: The Structure class to instantiate (defaults to pymatgen structure)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
Returns:
Equivalent pymatgen.core.structure.Structure
"""
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
lattice = atoms.get_cell()
<<<<<<< HEAD
return Structure(lattice, symbols, positions,
coords_are_cartesian=True)
=======
cls = Structure if cls is None else cls
return cls(lattice, symbols, positions,
coords_are_cartesian=True)
>>>>>>> a41cc069c865a5d0f35d0731f92c547467395b1b
|
Bismarrck/pymatgen
|
pymatgen/io/ase.py
|
Python
|
mit
| 2,331
|
[
"ASE",
"pymatgen"
] |
d3af72441eb567cac4ac8a947e36aa27190420c60f194de59d92e9df4f625395
|
import numpy
from chainer import cuda
from chainer import initializer
# Original code forked from MIT licensed keras project
# https://github.com/fchollet/keras/blob/master/keras/initializations.py
class Normal(initializer.Initializer):
"""Initializes array with a normal distribution.
Each element of the array is initialized by the value drawn
independently from Gaussian distribution whose mean is 0,
and standard deviation is ``scale``.
Args:
scale(float): Standard deviation of Gaussian distribution.
"""
def __init__(self, scale=0.05):
self.scale = scale
def __call__(self, array):
xp = cuda.get_array_module(array)
array[...] = xp.random.normal(
loc=0.0, scale=self.scale, size=array.shape)
class GlorotNormal(initializer.Initializer):
"""Initializes array with scaled Gaussian distribution.
Each element of the array is initialized by the value drawn
independently from Gaussian distribution whose mean is 0,
and standard deviation is
:math:`scale \\times \\sqrt{\\frac{2}{fan_{in} + fan_{out}}}`,
where :math:`fan_{in}` and :math:`fan_{out}` are the number of
input and output units, respectively.
Reference: Glorot & Bengio, AISTATS 2010
Args:
scale (float): A constant that detemines the scale
of the standard deviation.
"""
def __init__(self, scale=1.0):
self.scale = scale
def __call__(self, array):
fan_in, fan_out = initializer.get_fans(array.shape)
s = self.scale * numpy.sqrt(2. / (fan_in + fan_out))
return Normal(s)(array)
class HeNormal(initializer.Initializer):
"""Initializes array with scaled Gaussian distribution.
Each element of the array is initialized by the value drawn
independently from Gaussian distribution whose mean is 0,
and standard deviation is
:math:`scale \\times \\sqrt{\\frac{2}{fan_{in}}}`,
where :math:`fan_{in}` is the number of input units.
Reference: He et al., http://arxiv.org/abs/1502.01852
Args:
scale (float): A constant that detemines the scale
of the standard deviation.
"""
def __init__(self, scale=1.0):
self.scale = scale
def __call__(self, array):
fan_in, fan_out = initializer.get_fans(array.shape)
s = self.scale * numpy.sqrt(2. / fan_in)
return Normal(s)(array)
|
benob/chainer
|
chainer/initializers/normal.py
|
Python
|
mit
| 2,423
|
[
"Gaussian"
] |
ab94ec3b159761a6f9776b788730d77ff276e826e5dba5db9a6c6080c4ed7403
|
#!/usr/bin/env python
"""
SYNOPSIS
python dataset.py [-h,--help] [-v,--verbose] [-d,--directory DIRECTORY]
DESCRIPTION
Assert the existence of the GeoLife dataset within the specified DIRECTORY.
If the dataset is not present, this script will first see if a ZIP archive
is within that directory and will unpack it. If no ZIP archive exists, it
will download the GeoLife dataset, unpack it, and confirm the unpacking
resulted in PLX files now existing somewhere under the specified DIRECTORY.
This script can be used as a stand-alone script or imported into another
Python script as a module.
ARGUMENTS
-h, --help show this help message and exit
-v, --verbose verbose output
-d DIRECTORY, --directory DIRECTORY
directory where GeoLife dataset is stored
AUTHOR
Doug McGeehan <doug.mcgeehan@mst.edu>
LICENSE
This script is placed under the MIT License. Please refer to LICENSE file
in the parent directory for more details.
The GeoLife GPS Trajectory dataset is placed under the Microsoft Research
License Agreement that is described in its user guide that is included in
its ZIP archive.
"""
import argparse
import requests
from datetime import datetime
import os
import sys
import glob
import zipfile
import logging
logger = logging.getLogger("geolife.dataset")
# Direct link to the GeoLife ZIP archive.
# Valid as of 11 July, 2016.
GEOLIFE_ZIP_ARCHIVE_URL="https://download.microsoft.com/download/F/4/8/F4894AA5-FDBC-481E-9285-D5F8C4C4F039/Geolife%20Trajectories%201.3.zip"
# If the above URL is no longer valid, navigate to this page and manually
# download the dataset.
GEOLIFE_DOWNLOAD_PAGE="https://www.microsoft.com/en-us/download/details.aspx?id=52367"
def verify(directory="."):
"""
Verify the GeoLife dataset exists in this directory, and if not, make it
so. Return the dataset's root directory.
"""
# Check if uncompressed PLX files exist within the specified directory
try:
dataset_root = find_geolife_root(directory)
except PLXNotFound:
# If no PLX files exist in the directory, then check if a ZIP archive
# exists. If no ZIP archive exists, download it.
logger.warning("GeoLife PLX files not found in '{0}'. Checking for ZIP"
" archive.".format(directory)
)
zip_files = glob.glob(os.path.join(directory, "*.zip"))
if not zip_files:
logger.warning("No GeoLife ZIP archive. Proceeding with download.")
geolife_zip = download(url=GEOLIFE_ZIP_ARCHIVE_URL)
else:
geolife_zip = zip_files[0]
logger.info("GeoLife ZIP archive found at '{0}'".format(
geolife_zip
))
unpack(archive=geolife_zip, to=directory)
try:
dataset_root = find_geolife_root(directory)
except Exception:
logger.error(
"Unpacking the ZIP at '{zip}' did not result in PLX files."
" Perhaps '{zip}' is not a ZIP archive of the GeoLife files.\n"
"Please visit '{geolife_page}' and manually download the"
" GeoLife dataset. Make sure to place the ZIP archive in the"
" directory '{abs_path}' and try executing this script"
" again.".format(
zip=geolife_zip,
geolife_page=GEOLIFE_DOWNLOAD_PAGE,
abs_path=os.path.abspath(directory)
))
sys.exit(1)
return dataset_root
def find_geolife_root(directory_to_search, just_downloaded=False):
"""
Walk down tree until a PLT file is encountered. If none is found, raise
an exception.
"""
directory_containing_plt = None
for d, subd, files in os.walk(directory_to_search):
for f in files:
if f.lower().endswith(".plt"):
directory_containing_plt = d
break
if directory_containing_plt is None:
raise PLXNotFound
geolife_root = os.path.abspath(
os.path.dirname(os.path.dirname(directory_containing_plt))
)
logger.info("GeoLife dataset found within '{0}'".format(geolife_root))
return geolife_root
def download(url):
"""
Download the GeoLife dataset from Microsoft Research.
"""
logger.info("Downloading from '{0}'. Please be patient.".format(url))
logger.info(
"After this run, downloading shouldn't have to be performed again"
)
save_to = os.path.join(".", "geolife.zip")
downloader = requests.get(url, stream=True)
try:
progress_downloader(downloader, save_to=save_to)
except ImportError:
# You don't have progressbar2 installed, so you won't get a pretty
# progress bar to tell you how far along you are in the download.
# You can install it like so:
# $ sudo pip install progressbar2
size_in_MB = int(downloader.headers.get('content-length')) / 1e6
logger.warning(
"File size to download: {0:.2f} MB. This may take some time."
" Go have a coffee.".format(
size_in_MB
))
with open(save_to, "wb") as f:
for chunk in downloader.iter_content(chunk_size=4098):
if chunk:
f.write(chunk)
f.flush()
except Exception:
logger.error(
"It appears the download url '{url}' is no longer valid. Please"
" visit '{geolife_page}' and manually download the GeoLife dataset"
" from there. Make sure to place the ZIP archive in the directory"
" '{abs_path}' and try executing this script again.".format(
url=url, geolife_page=GEOLIFE_DOWNLOAD_PAGE,
abs_path=os.path.abspath(save_to)
))
sys.exit(1)
logger.info("Download complete!")
return save_to
def progress_downloader(downloader, save_to):
"""
Another downloader function, but with a progress bar so you don't have to
stare at a blank screen.
e.g.
71% |################# | Elapsed Time: 0:00:45 | ETA: 0:00:15 683.9 KiB/s
"""
from progressbar import ProgressBar
from progressbar import Percentage
from progressbar import Bar
from progressbar import Timer
from progressbar import ETA
from progressbar import AdaptiveTransferSpeed
download_size = int(downloader.headers.get('content-length'))
amount_downloaded = 0
widgets = [
Percentage(),
' ', Bar(),
' ', Timer(),
' | ', ETA(),
' ', AdaptiveTransferSpeed(),
]
download_progress = ProgressBar(widgets=widgets, max_value=download_size)
download_progress.start()
with open(save_to, "wb") as f:
for chunk in downloader.iter_content(chunk_size=4098):
if chunk:
f.write(chunk)
f.flush()
amount_downloaded += len(chunk)
download_progress.update(amount_downloaded)
download_progress.finish()
def unpack(archive, to):
"""
Unpack the zip archive.
"""
logger.info(
"Unpacking ZIP archive '{0}' to '{1}'. Please be patient.".format(
archive, to
))
logger.info(
"After this run, unpacking shouldn't have to be performed again"
)
unzipper = zipfile.ZipFile(archive, 'r')
unzipper.extractall(to)
unzipper.close()
logger.info("Unpacking complete!")
class PLXNotFound(IOError):
def __init__(self,*args,**kwargs):
IOError.__init__(self,*args,**kwargs)
def setup_logger(args):
# create logger with 'spam_application'
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('geolife.dataset.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
if args.verbose:
ch.setLevel(logging.DEBUG)
else:
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
fh.setFormatter(logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
))
ch.setFormatter(logging.Formatter(
'%(levelname)s - %(message)s'
))
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
if __name__ == '__main__':
try:
start_time = datetime.now()
parser = argparse.ArgumentParser(
description="Verify, unpack, or download the GeoLife GPS trajectory"
" dataset for further processing."
)
parser.add_argument('-v', '--verbose', action='store_true',
default=False, help='verbose output')
parser.add_argument('-d', '--directory', dest='directory',
default=".",
help="directory where GeoLife dataset is stored")
args = parser.parse_args()
setup_logger(args)
logger.debug(start_time)
verify(args.directory)
finish_time = datetime.now()
logger.debug(finish_time)
logger.debug('Execution time: {time}'.format(
time=(finish_time - start_time)
))
sys.exit(0)
except KeyboardInterrupt, e: # Ctrl-C
raise e
except SystemExit, e: # sys.exit()
raise e
except Exception, e:
logger.exception("Something happened and I don't know what to do D:")
sys.exit(1)
|
DrDougPhD/geolife2one
|
data/dataset.py
|
Python
|
mit
| 9,564
|
[
"VisIt"
] |
3c32737a0f65bd40310b67bf27eff8fec80598f922f12fd56db643c8afcf7a1d
|
#!/usr/bin/python
####
# 02/2006 Will Holcomb <wholcomb@gmail.com>
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# 7/26/07 Slightly modified by Brian Schneider
# in order to support unicode files ( multipart_encode function )
"""
Usage:
Enables the use of multipart/form-data for posting forms
Inspirations:
Upload files in python:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
urllib2_file:
Fabien Seisen: <fabien@seisen.org>
Example:
import MultipartPostHandler, urllib2, cookielib
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),
MultipartPostHandler.MultipartPostHandler)
params = { "username" : "bob", "password" : "riviera",
"file" : open("filename", "rb") }
opener.open("http://wwww.bobsite.com/upload/", params)
Further Example:
The main function of this file is a sample which downloads a page and
then uploads it to the W3C validator.
"""
import os
import sys
import six
import tempfile
import mimetypes
from os import SEEK_END
if six.PY3:
import io
import urllib.parse
import urllib.request
from email.generator import _make_boundary as choose_boundary
else:
import cStringIO as io
from six.moves import urllib
from mimetools import choose_boundary
# Controls how sequences are uncoded. If true, elements
# may be given multiple values byassigning a sequence.
doseq = 1
class PostHandler(urllib.request.BaseHandler):
handler_order = urllib.request.HTTPHandler.handler_order - 10
def http_request(self, request):
try:
data = request.get_data()
except AttributeError:
data = request.data
if data is not None and type(data) != str:
data = urllib.parse.urlencode(data, doseq).encode("utf-8")
try:
request.add_data(data)
except AttributeError:
request.data = data
return request
https_request = http_request
class MultipartPostHandler(urllib.request.BaseHandler):
# needs to run first
handler_order = urllib.request.HTTPHandler.handler_order - 10
def http_request(self, request):
try:
data = request.get_data()
except AttributeError:
data = request.data
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in list(data.items()):
if hasattr(value, 'read'):
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
raise TypeError
if len(v_files) == 0:
data = urllib.parse.urlencode(v_vars, doseq)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if (
request.has_header('Content-Type') and
request.get_header('Content-Type').find(
'multipart/form-data') != 0
):
six.print_(
"Replacing %s with %s" % (
request.get_header('content-type'),
'multipart/form-data'
)
)
request.add_unredirected_header('Content-Type', contenttype)
try:
request.add_data(data)
except AttributeError:
request.data = data
return request
def multipart_encode(self, v_vars, files, boundary=None, buf=None):
if six.PY3:
if boundary is None:
boundary = choose_boundary()
if buf is None:
buf = io.BytesIO()
for(key, value) in v_vars:
buf.write(b'--' + boundary.encode("utf-8") + b'\r\n')
buf.write(
b'Content-Disposition: form-data; name="' +
key.encode("utf-8") +
b'"'
)
buf.write(b'\r\n\r\n' + value.encode("utf-8") + b'\r\n')
for(key, fd) in files:
try:
filename = fd.name.split('/')[-1]
except AttributeError:
# Spoof a file name if the object doesn't have one.
# This is designed to catch when the user submits
# a StringIO object
filename = 'temp.pdf'
contenttype = mimetypes.guess_type(filename)[0] or \
b'application/octet-stream'
buf.write(b'--' + boundary.encode("utf-8") + b'\r\n')
buf.write(
b'Content-Disposition: form-data; ' +
b'name="' + key.encode("utf-8") + b'"; ' +
b'filename="' + filename.encode("utf-8") + b'"\r\n'
)
buf.write(
b'Content-Type: ' +
contenttype.encode("utf-8") +
b'\r\n'
)
fd.seek(0)
buf.write(
b'\r\n' + fd.read() + b'\r\n'
)
buf.write(b'--')
buf.write(boundary.encode("utf-8"))
buf.write(b'--\r\n\r\n')
buf = buf.getvalue()
return boundary, buf
else:
if boundary is None:
boundary = choose_boundary()
if buf is None:
buf = io.StringIO()
for(key, value) in v_vars:
buf.write('--%s\r\n' % boundary)
buf.write('Content-Disposition: form-data; name="%s"' % key)
buf.write('\r\n\r\n' + value + '\r\n')
for(key, fd) in files:
try:
filename = fd.name.split('/')[-1]
except AttributeError:
# Spoof a file name if the object doesn't have one.
# This is designed to catch when the user submits
# a StringIO object
filename = 'temp.pdf'
contenttype = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
buf.write('--%s\r\n' % boundary)
buf.write('Content-Disposition: form-data; \
name="%s"; filename="%s"\r\n' % (key, filename))
buf.write('Content-Type: %s\r\n' % contenttype)
# buffer += 'Content-Length: %s\r\n' % file_size
fd.seek(0)
buf.write('\r\n' + fd.read() + '\r\n')
buf.write('--' + boundary + '--\r\n\r\n')
buf = buf.getvalue()
return boundary, buf
https_request = http_request
def getsize(o_file):
"""
get the size, either by seeeking to the end.
"""
startpos = o_file.tell()
o_file.seek(0)
o_file.seek(0, SEEK_END)
size = o_file.tell()
o_file.seek(startpos)
return size
def main():
opener = urllib.request.build_opener(MultipartPostHandler)
def validateFile(url):
temp = tempfile.mkstemp(suffix=".html")
os.write(temp[0], opener.open(url).read())
os.remove(temp[1])
if len(sys.argv[1:]) > 0:
for arg in sys.argv[1:]:
validateFile(arg)
else:
validateFile("http://www.google.com")
if __name__ == "__main__":
main()
|
datadesk/python-documentcloud
|
documentcloud/MultipartPostHandler.py
|
Python
|
mit
| 8,051
|
[
"Brian"
] |
d1932bf7956f66d28e0bdf6eca444ad0dc9fc9b319f7ab68e42cf990c48012c7
|
"""
CIS is an open source command-line tool for easy collocation, visualization, analysis, and
comparison of diverse gridded and ungridded datasets used in the atmospheric sciences.
.. note ::
The CIS documentation has detailed usage information, including a :doc:`user guide <../index>`
for new users.
As a commmand line tool, CIS has not been designed with a python API in mind. There are however some utility functions
that may provide a useful start for those who wish to use CIS as a python library. The functions in this module provide
the main way to load your data. They can be easily import using, for example: `from cis import read_data`.
The :func:`read_data` function is a simple way to read a single gridded or ungridded data object (e.g. a NetCDF
variable) from one or more files. CIS will determine the best way to interperet the datafile by comparing the file
signature with the built-in data reading plugins and any user defined plugins. Specifying a particular ``product``
allows the user to override this automatic detection.
The :func:`read_data_list` function is very similar to :func:`read_data` except that it allows the user to specify
more than one variable name. This function returns a list of data objects, either all of which will be gridded, or all
ungridded, but not a mix. For ungridded data lists it is assumed that all objects share the same coordinates.
"""
__author__ = "David Michel, Daniel Wallis, Duncan Watson-Parris, Richard Wilkinson, Ian Bush, Matt Kendall, John Holt"
__version__ = "1.4.1"
__status__ = "Dev"
__website__ = "http://www.cistools.net/"
__all__ = ['read_data', 'read_data_list']
def read_data(filenames, variable, product=None):
"""
Read a specific variable from a list of files
Files can be either gridded or ungridded but not a mix of both.
First tries to read data as gridded, if that fails, tries as ungridded.
:param filenames: The filenames of the files to read. This can be either a single filename as a string, a comma
separated list, or a :class:`list` of string filenames. Filenames can include directories which will be expanded to
include all files in that directory, or wildcards such as ``*`` or ``?``.
:type filenames: string or list
:param str variable: The variable to read from the files
:param str product: The name of the data reading plugin to use to read the data (e.g. ``Cloud_CCI``).
:return: The specified data as either a :class:`GriddedData` or :class:`UngriddedData` object.
"""
data_list = read_data_list(filenames, variable, product)
if len(data_list) > 1:
raise ValueError("More than one {} variable found".format(variable))
return data_list[0]
def read_data_list(filenames, variables, product=None, aliases=None):
"""
Read multiple data objects from a list of files. Files can be either gridded or ungridded but not a mix of both.
:param filenames: The filenames of the files to read. This can be either a single filename as a string, a comma
separated list, or a :class:`list` of string filenames. Filenames can include directories which will be expanded to
include all files in that directory, or wildcards such as ``*`` or ``?``.
:type filenames: string or list
:param variables: One or more variables to read from the files
:type variables: string or list
:param str product: The name of the data reading plugin to use to read the data (e.g. ``Cloud_CCI``).
:param aliases: List of aliases to put on each variable's data object as an alternative means of identifying them.
:type aliases: string or list
:return: A list of the data read out (either a :class:`GriddedDataList` or :class:`UngriddedDataList` depending on
the type of data contained in the files)
"""
from cis.data_io.data_reader import DataReader, expand_filelist
try:
file_set = expand_filelist(filenames)
except ValueError as e:
raise IOError(e)
if len(file_set) == 0:
raise IOError("No files found which match: {}".format(filenames))
return DataReader().read_data_list(expand_filelist(filenames), variables, product, aliases)
|
zak-k/cis
|
cis/__init__.py
|
Python
|
gpl-3.0
| 4,185
|
[
"NetCDF"
] |
c3584127df5896f578d6e3f0eeb95125de1ca2f923d3f8f174575187173ff37e
|
""" BHMM: A toolkit for Bayesian hidden Markov model analysis of single-molecule trajectories.
This project provides tools for estimating the number of metastable states, rate
constants between the states, equilibrium populations, distributions
characterizing the states, and distributions of these quantities from
single-molecule data. This data could be FRET data, single-molecule pulling
data, or any data where one or more observables are recorded as a function of
time. A Hidden Markov Model (HMM) is used to interpret the observed dynamics,
and a distribution of models that fit the data is sampled using Bayesian
inference techniques and Markov chain Monte Carlo (MCMC), allowing for both the
characterization of uncertainties in the model and modeling of the expected
information gain by new experiments.
"""
from __future__ import print_function
import os
from os.path import relpath, join
import numpy
import versioneer
from Cython.Build import cythonize
from setuptools import setup, Extension, find_packages
DOCLINES = __doc__.split("\n")
########################
CLASSIFIERS = """\
Development Status :: 3 - Alpha
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)
Programming Language :: Python
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Scientific/Engineering :: Chemistry
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
################################################################################
# USEFUL SUBROUTINES
################################################################################
def find_package_data(data_root, package_root):
files = []
for root, dirnames, filenames in os.walk(data_root):
for fn in filenames:
files.append(relpath(join(root, fn), package_root))
return files
################################################################################
# SETUP
################################################################################
extensions = [Extension('bhmm.hidden.impl_c.hidden',
sources = ['./bhmm/hidden/impl_c/hidden.pyx',
'./bhmm/hidden/impl_c/_hidden.c'],
include_dirs = ['/bhmm/hidden/impl_c/',numpy.get_include()]),
Extension('bhmm.output_models.impl_c.discrete',
sources = ['./bhmm/output_models/impl_c/discrete.pyx',
'./bhmm/output_models/impl_c/_discrete.c'],
include_dirs = ['/bhmm/output_models/impl_c/',numpy.get_include()]),
Extension('bhmm.output_models.impl_c.gaussian',
sources = ['./bhmm/output_models/impl_c/gaussian.pyx',
'./bhmm/output_models/impl_c/_gaussian.c'],
include_dirs = ['/bhmm/output_models/impl_c/',numpy.get_include()]),
Extension('bhmm._external.clustering.kmeans_clustering',
sources=['./bhmm/_external/clustering/src/clustering.c',
'./bhmm/_external/clustering/src/kmeans.c'],
include_dirs=['./bhmm/_external/clustering/include',
numpy.get_include()],
extra_compile_args=['-std=c99']),
]
setup(
name='bhmm',
author='John Chodera and Frank Noe',
author_email='john.chodera@choderalab.org',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='LGPL',
url='https://github.com/bhmm/bhmm',
platforms=['Linux', 'Mac OS-X', 'Unix', 'Windows'],
classifiers=CLASSIFIERS.splitlines(),
package_dir={'bhmm': 'bhmm'},
packages=find_packages(),
package_data={'bhmm': find_package_data('examples', 'bhmm') + find_package_data('bhmm/tests/data', 'bhmm')}, # NOTE: examples installs to bhmm.egg/examples/, NOT bhmm.egg/bhmm/examples/. You need to do utils.get_data_filename("../examples/*/setup/").
zip_safe=False,
install_requires=[
'cython',
'numpy',
'scipy',
'msmtools',
'six',
],
ext_modules=cythonize(extensions)
)
|
marscher/bhmm
|
setup.py
|
Python
|
lgpl-3.0
| 4,392
|
[
"Gaussian"
] |
b9df924bd3c0c6f2f7ef944014e6f4af1161f9f0e00e60d81e64806eb839221a
|
from urllib import quote_plus
from flask import jsonify, make_response, request, abort, render_template
from app import app
from app.urlshortener import URLShortener
from app.urlshortener.url import decodeURLPath, encodeURL, isValidScheme
from app.urlshortener.name import removeControlCharacters, isValidName
backend = URLShortener()
def getDefaultName():
name = request.form.get('default_name', '')
return backend.getNextName(name)
def bad_request(reason, error_code=0, code=400):
return make_response(jsonify( {
'error': reason, 'error_code': error_code
} ), code)
@app.route('/<name>', methods = ['GET'])
def visit(name):
url = backend.visit(name)
if url is None:
abort(404)
return jsonify({
'short-url': 'http://lyli.fi/%s' % name,
'url': url
})
@app.route('/', methods = ['POST'])
def new():
if not request.json:
return bad_request('Data must be sent in json format')
url = request.json.get('url', '')
try:
url = encodeURL(url)
except:
return bad_request('Could not encode URL', 1)
name = request.json.get('name', getDefaultName())
name = decodeURLPath(name)
name = removeControlCharacters(name)
if url == '':
return bad_request('No URL', 2)
elif not isValidScheme(url):
return bad_request('Illegal scheme', 3, 403)
elif not isValidName(name):
return bad_request('Illegal name', 4, 403)
elif not backend.shorten(url, name):
return bad_request('Name is already in use', 5, 403)
return make_response(jsonify({
'short-url': 'http://lyli.fi/%s' % quote_plus(name.encode('utf-8')),
'url': url
}), 201)
@app.route('/', methods = ['GET'])
def index():
return render_template('index.html')
@app.errorhandler(404)
def notfound(error):
return make_response(jsonify( { 'error': 'Not Found' } ), 404)
@app.errorhandler(500)
def eotfnund(error):
return make_response(jsonify( { 'error': 'Internal Server Error' } ), 500)
|
ollpu/lyli-api
|
app/views.py
|
Python
|
mit
| 2,076
|
[
"VisIt"
] |
c557b1e7044d9c60ee5c7ec892bd8344023dd8b3044e1de04a97c2da9f145b37
|
from __future__ import annotations
import math
from scitbx import matrix
from scitbx.math import r3_rotation_axis_and_angle_from_matrix
def difference_rotation_matrix_axis_angle(crystal_a, crystal_b, target_angle=0):
from cctbx import sgtbx
# assert crystal_a.get_space_group() == crystal_b.get_space_group()
space_group = crystal_b.get_space_group()
best_R_ab = None
best_cb_op = None
best_axis = None
best_angle = 1e8
# iterate over space group ops to find smallest differences
for i_op, op in enumerate(space_group.build_derived_laue_group().all_ops()):
if op.r().determinant() < 0:
continue
elif not op.t().is_zero():
continue
cb_op = sgtbx.change_of_basis_op(op.inverse())
crystal_b_sym = crystal_b.change_basis(cb_op)
U_a = matrix.sqr(crystal_a.get_U())
U_b = matrix.sqr(crystal_b_sym.get_U())
assert U_a.is_r3_rotation_matrix()
assert U_b.is_r3_rotation_matrix()
# the rotation matrix to transform from U_a to U_b
R_ab = U_b * U_a.transpose()
axis_angle = r3_rotation_axis_and_angle_from_matrix(R_ab)
axis = axis_angle.axis
angle = axis_angle.angle() * 180.0 / math.pi
for sign in (+1, -1):
if abs(sign * angle - target_angle) < abs(best_angle - target_angle):
best_angle = sign * angle
best_axis = tuple(sign * a for a in axis)
best_R_ab = R_ab if sign > 0 else R_ab.inverse()
best_cb_op = cb_op if sign > 0 else cb_op.inverse()
return best_R_ab, best_axis, best_angle, best_cb_op
def rotation_matrix_differences(
crystal_models, miller_indices=None, comparison="pairwise"
):
assert comparison in ("pairwise", "sequential"), comparison
output = []
for i, cm_i in enumerate(crystal_models):
for j in range(i + 1, len(crystal_models)):
if comparison == "sequential" and j > i + 1:
break
R_ij, axis, angle, cb_op = difference_rotation_matrix_axis_angle(
cm_i, crystal_models[j]
)
output.append(f"Change of basis op: {cb_op}")
output.append(
"Rotation matrix to transform crystal %i to crystal %i:"
% (i + 1, j + 1)
)
output.append(R_ij.mathematica_form(format="%.3f", one_row_per_line=True))
output.append(
f"Rotation of {angle:.3f} degrees about axis ({axis[0]:.3f}, {axis[1]:.3f}, {axis[2]:.3f})"
)
if miller_indices is not None:
for hkl in miller_indices:
cm_j = crystal_models[j].change_basis(cb_op)
A_i = cm_i.get_A()
A_j = cm_j.get_A()
a_star_i = matrix.col(A_i[0:3])
b_star_i = matrix.col(A_i[3:6])
c_star_i = matrix.col(A_i[6:9])
a_star_j = matrix.col(A_j[0:3])
b_star_j = matrix.col(A_j[3:6])
c_star_j = matrix.col(A_j[6:9])
v_i = hkl[0] * a_star_i + hkl[1] * b_star_i + hkl[2] * c_star_i
v_j = hkl[0] * a_star_j + hkl[1] * b_star_j + hkl[2] * c_star_j
output.append(
f"({hkl[0]},{hkl[1]},{hkl[2]}): %.2f deg"
% v_i.angle(v_j, deg=True)
)
output.append("")
return "\n".join(output)
|
dials/dials
|
algorithms/indexing/compare_orientation_matrices.py
|
Python
|
bsd-3-clause
| 3,522
|
[
"CRYSTAL"
] |
a199a15e1da4f192b077a09892b03d8e06cccaf54aaa18a87937964c0e6d8bfb
|
import unittest
import tempfile
import shutil
import os
from contextlib import closing
from traits.testing.api import UnittestTools
from tvtk.api import tvtk
from mayavi.core.api import NullEngine
from simphony.cuds.particles import Particles
from simphony.cuds.mesh import Mesh
from simphony.cuds.lattice import make_cubic_lattice
from simphony.io.h5_cuds import H5CUDS
from simphony.io.h5_lattice import H5Lattice
from simphony.io.h5_mesh import H5Mesh
from simphony.io.h5_particles import H5Particles
from simphony_mayavi.sources.api import CUDSFileSource
from simphony_mayavi.cuds.api import VTKParticles, VTKLattice, VTKMesh
class TestLatticeSource(unittest.TestCase, UnittestTools):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.maxDiff = None
self.filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(self.filename, mode='w')) as handle:
handle.add_dataset(Mesh(name='mesh1'))
handle.add_dataset(Particles(name='particles1'))
handle.add_dataset(Particles(name='particles3'))
handle.add_dataset(make_cubic_lattice(
'lattice0', 0.2, (5, 10, 15), (0.0, 0.0, 0.0)))
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_initialization(self):
source = CUDSFileSource()
source.initialize(self.filename)
self.assertItemsEqual(
source.datasets, ['mesh1', 'particles1', 'particles3', 'lattice0'])
self.assertIn(source.dataset, source.datasets)
def test_update(self):
source = CUDSFileSource()
source.initialize(self.filename)
# after initialize we need to call update to get the data loaded.
with self.assertTraitChanges(source, 'data_changed'):
source.update()
self.assertIsInstance(source.cuds, H5Particles)
self.assertIsInstance(source._vtk_cuds, VTKParticles)
self.assertIsInstance(source.outputs[0], tvtk.PolyData)
def test_dataset_change(self):
source = CUDSFileSource()
source.initialize(self.filename)
with self.assertTraitChanges(source, 'data_changed'):
source.dataset = 'lattice0'
self.assertIsInstance(source.cuds, H5Lattice)
self.assertIsInstance(source._vtk_cuds, VTKLattice)
self.assertIsInstance(source.outputs[0], tvtk.ImageData)
with self.assertTraitChanges(source, 'data_changed'):
source.dataset = 'mesh1'
self.assertIsInstance(source.cuds, H5Mesh)
self.assertIsInstance(source._vtk_cuds, VTKMesh)
self.assertIsInstance(source.outputs[0], tvtk.UnstructuredGrid)
def test_source_name(self):
# given
source = CUDSFileSource()
source.initialize(self.filename)
# when
source.update()
# then
self.assertEqual(source.name, 'CUDS File: particles1 (CUDS Particles)')
# when
source.dataset = 'mesh1'
# then
self.assertEqual(source.name, 'CUDS File: mesh1 (CUDS Mesh)')
def test_add_to_engine(self):
source = CUDSFileSource()
source.initialize(self.filename)
engine = NullEngine()
# When the source is added to an engine it should load the dataset.
with self.assertTraitChanges(source, 'data_changed'):
engine.add_source(source)
self.assertIsInstance(source.cuds, H5Particles)
self.assertIsInstance(source._vtk_cuds, VTKParticles)
self.assertIsInstance(source.outputs[0], tvtk.PolyData)
def test_save_load_visualization(self):
# set up visualization
source = CUDSFileSource()
source.initialize(self.filename)
source.dataset = "mesh1"
engine = NullEngine()
engine.add_source(source)
# save the visualization
saved_viz_file = os.path.join(self.temp_dir, 'test_saved_viz.mv2')
engine.save_visualization(saved_viz_file)
engine.stop()
# restore the visualization
engine.load_visualization(saved_viz_file)
source_in_scene = engine.current_scene.children[0]
# check
self.assertItemsEqual(
source_in_scene.datasets,
['mesh1', 'particles1', 'particles3', 'lattice0'])
self.assertIn(source_in_scene.dataset,
source_in_scene.datasets)
# should allow changing dataset as usual
with self.assertTraitChanges(source_in_scene, "data_changed"):
source_in_scene.dataset = "lattice0"
def test_error_restore_visualization_file_changed(self):
''' Test if the data is restored anyway for unloadable file'''
# set up visualization
source = CUDSFileSource()
source.initialize(self.filename)
engine = NullEngine()
engine.add_source(source)
# save the visualization
saved_viz_file = os.path.join(self.temp_dir, 'test_saved_viz.mv2')
engine.save_visualization(saved_viz_file)
engine.stop()
# now remove the file
# the file handler to self.filename should be closed
os.remove(self.filename)
# restore the visualization
engine.load_visualization(saved_viz_file)
source_in_scene = engine.current_scene.children[0]
# check that the data is restored anyway
self.assertIsNotNone(source_in_scene.data)
# but datasets and cuds are empty
self.assertEqual(source_in_scene.datasets, [])
self.assertIsNone(source_in_scene._cuds)
|
simphony/simphony-mayavi
|
simphony_mayavi/sources/tests/test_cuds_file_source.py
|
Python
|
bsd-2-clause
| 5,557
|
[
"Mayavi"
] |
bc2c89e394708e913f012b648ba6239cfa0a8b35346b787d15ad4a3492f6ed8d
|
# $HeadURL$
__RCSID__ = "$Id$"
import sys
import os
import getopt
import types
import DIRAC
from DIRAC import gLogger
from DIRAC import S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.private.Refresher import gRefresher
from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceSection, getAgentSection, getExecutorSection
from DIRAC.Core.Utilities.Devloader import Devloader
class LocalConfiguration:
"""
Main class to interface with Configuration of a running DIRAC Component.
For most cases this is handled via
- DIRAC.Core.Base.Script class for scripts
- dirac-agent for agents
- dirac-service for services
"""
def __init__( self, defaultSectionPath = "" ):
self.currentSectionPath = defaultSectionPath
self.mandatoryEntryList = []
self.optionalEntryList = []
self.commandOptionList = []
self.unprocessedSwitches = []
self.additionalCFGFiles = []
self.parsedOptionList = []
self.commandArgList = []
self.cliAdditionalCFGFiles = []
self.__registerBasicOptions()
self.isParsed = False
self.componentName = "Unknown"
self.componentType = False
self.loggingSection = "/DIRAC"
self.initialized = False
self.__usageMessage = False
self.__debugMode = 0
def disableParsingCommandLine( self ):
self.isParsed = True
def __getAbsolutePath( self, optionPath ):
if optionPath[0] == "/":
return optionPath
else:
return "%s/%s" % ( self.currentSectionPath, optionPath )
def addMandatoryEntry( self, optionPath ):
"""
Define a mandatory Configuration data option for the parsing of the command line
"""
self.mandatoryEntryList.append( optionPath )
def addDefaultEntry( self, optionPath, value ):
"""
Define a default value for a Configuration data option
"""
if optionPath[0] == "/":
if not gConfigurationData.extractOptionFromCFG( optionPath ):
self.__setOptionValue( optionPath, value )
else:
self.optionalEntryList.append( ( optionPath,
str( value ) ) )
def addCFGFile( self, filePath ):
"""
Load additional .cfg file to be parsed
"""
self.additionalCFGFiles.append( filePath )
def setUsageMessage( self, usageMsg ):
"""
Define message to be display by the showHelp method
"""
self.__usageMessage = usageMsg
def __setOptionValue( self, optionPath, value ):
gConfigurationData.setOptionInCFG( self.__getAbsolutePath( optionPath ),
str( value ) )
def __registerBasicOptions( self ):
self.registerCmdOpt( "o:", "option=", "Option=value to add",
self.__setOptionByCmd )
self.registerCmdOpt( "s:", "section=", "Set base section for relative parsed options",
self.__setSectionByCmd )
self.registerCmdOpt( "c:", "cert=", "Use server certificate to connect to Core Services",
self.__setUseCertByCmd )
self.registerCmdOpt( "d", "debug", "Set debug mode (-dd is extra debug)",
self.__setDebugMode )
devLoader = Devloader()
if devLoader.enabled:
self.registerCmdOpt( "", "autoreload", "Automatically restart if there's any change in the module",
self.__setAutoreload )
self.registerCmdOpt( "", "license", "Show DIRAC's LICENSE",
self.showLicense )
self.registerCmdOpt( "h", "help", "Shows this help",
self.showHelp )
def registerCmdOpt( self, shortOption, longOption, helpString, function = False ):
"""
Register a new command line option
"""
shortOption = shortOption.strip()
longOption = longOption.strip()
if not shortOption and not longOption:
raise Exception( "No short or long options defined" )
for optTuple in self.commandOptionList:
if shortOption and optTuple[0] == shortOption:
raise Exception( "Short switch %s is already defined!" % shortOption )
if longOption and optTuple[1] == longOption:
raise Exception( "Long switch %s is already defined!" % longOption )
self.commandOptionList.append( ( shortOption, longOption, helpString, function ) )
def getExtraCLICFGFiles( self ):
"""
Retrieve list of parsed .cfg files
"""
if not self.isParsed:
self.__parseCommandLine()
return self.cliAdditionalCFGFiles
def getPositionalArguments( self ):
"""
Retrieve list of command line positional arguments
"""
if not self.isParsed:
self.__parseCommandLine()
return self.commandArgList
def getUnprocessedSwitches( self ):
"""
Retrieve list of command line switches without a callback function
"""
if not self.isParsed:
self.__parseCommandLine()
return self.unprocessedSwitches
def __checkMandatoryOptions( self ):
try:
isMandatoryMissing = False
for optionPath in self.mandatoryEntryList:
optionPath = self.__getAbsolutePath( optionPath )
if not gConfigurationData.extractOptionFromCFG( optionPath ):
gLogger.fatal( "Missing mandatory local configuration option", optionPath )
isMandatoryMissing = True
if isMandatoryMissing:
return S_ERROR()
return S_OK()
except Exception, e:
gLogger.exception()
return S_ERROR( str( e ) )
#TODO: Initialize if not previously initialized
def initialize( self, componentName ):
"""
Make sure DIRAC is properly initialized
"""
if self.initialized:
return S_OK()
self.initialized = True
#Set that the command line has already been parsed
self.isParsed = True
if not self.componentType:
self.setConfigurationForScript( componentName )
try:
retVal = self.__addUserDataToConfiguration()
self.__initLogger( self.componentName, self.loggingSection )
if not retVal[ 'OK' ]:
return retVal
retVal = self.__checkMandatoryOptions()
if not retVal[ 'OK' ]:
return retVal
except Exception, e:
gLogger.exception()
return S_ERROR( str( e ) )
return S_OK()
def __initLogger( self, componentName, logSection ):
gLogger.initialize( componentName, logSection )
if self.__debugMode == 1:
gLogger.setLevel( "VERBOSE" )
elif self.__debugMode == 2:
gLogger.setLevel( "VERBOSE" )
gLogger.showHeaders( True )
elif self.__debugMode >= 3:
gLogger.setLevel( "DEBUG" )
gLogger.showHeaders( True )
def loadUserData( self ):
"""
This is the magic method that reads the command line and processes it
It is used by the Script Base class and the dirac-service and dirac-agent scripts
Before being called:
- any additional switches to be processed
- mandatory and default configuration configuration options
must be defined.
"""
if self.initialized:
return S_OK()
self.initialized = True
try:
retVal = self.__addUserDataToConfiguration()
for optionTuple in self.optionalEntryList:
optionPath = self.__getAbsolutePath( optionTuple[0] )
if not gConfigurationData.extractOptionFromCFG( optionPath ):
gConfigurationData.setOptionInCFG( optionPath, optionTuple[1] )
self.__initLogger( self.componentName, self.loggingSection )
if not retVal[ 'OK' ]:
return retVal
retVal = self.__checkMandatoryOptions()
if not retVal[ 'OK' ]:
return retVal
except Exception, e:
gLogger.exception()
return S_ERROR( str( e ) )
return S_OK()
def __parseCommandLine( self ):
gLogger.debug( "Parsing command line" )
shortOption = ""
longOptionList = []
for optionTuple in self.commandOptionList:
if shortOption.find( optionTuple[0] ) < 0:
shortOption += "%s" % optionTuple[0]
else:
if optionTuple[0]:
gLogger.error( "Short option -%s has been already defined" % optionTuple[0] )
if not optionTuple[1] in longOptionList:
longOptionList.append( "%s" % optionTuple[1] )
else:
if optionTuple[1]:
gLogger.error( "Long option --%s has been already defined" % optionTuple[1] )
try:
opts, args = getopt.gnu_getopt( sys.argv[1:], shortOption, longOptionList )
except getopt.GetoptError, x:
# x = option "-k" not recognized
# print help information and exit
gLogger.fatal( "Error when parsing command line arguments: %s" % str( x ) )
self.showHelp()
sys.exit( 2 )
for o, v in opts:
if o in ( '-h', '--help' ):
self.showHelp()
sys.exit(2)
self.cliAdditionalCFGFiles = [ arg for arg in args if arg[-4:] == ".cfg" ]
self.commandArgList = [ arg for arg in args if not arg[-4:] == ".cfg" ]
self.parsedOptionList = opts
self.isParsed = True
def __loadCFGFiles( self ):
"""
Load ~/.dirac.cfg
Load cfg files specified in addCFGFile calls
Load cfg files with come from the command line
"""
errorsList = []
if 'DIRACSYSCONFIG' in os.environ:
gConfigurationData.loadFile( os.environ[ 'DIRACSYSCONFIG' ] )
gConfigurationData.loadFile( os.path.expanduser( "~/.dirac.cfg" ) )
for fileName in self.additionalCFGFiles:
gLogger.debug( "Loading file %s" % fileName )
retVal = gConfigurationData.loadFile( fileName )
if not retVal[ 'OK' ]:
gLogger.debug( "Could not load file %s: %s" % ( fileName, retVal[ 'Message' ] ) )
errorsList.append( retVal[ 'Message' ] )
for fileName in self.cliAdditionalCFGFiles:
gLogger.debug( "Loading file %s" % fileName )
retVal = gConfigurationData.loadFile( fileName )
if not retVal[ 'OK' ]:
gLogger.debug( "Could not load file %s: %s" % ( fileName, retVal[ 'Message' ] ) )
errorsList.append( retVal[ 'Message' ] )
return errorsList
def __addUserDataToConfiguration( self ):
if not self.isParsed:
self.__parseCommandLine()
errorsList = self.__loadCFGFiles()
if gConfigurationData.getServers():
retVal = self.syncRemoteConfiguration()
if not retVal[ 'OK' ]:
return retVal
else:
gLogger.warn( "Running without remote configuration" )
try:
if self.componentType == "service":
self.__setDefaultSection( getServiceSection( self.componentName ) )
elif self.componentType == "agent":
self.__setDefaultSection( getAgentSection( self.componentName ) )
elif self.componentType == "executor":
self.__setDefaultSection( getExecutorSection( self.componentName ) )
elif self.componentType == "web":
self.__setDefaultSection( "/%s" % self.componentName )
elif self.componentType == "script":
if self.componentName and self.componentName[0] == "/":
self.__setDefaultSection( self.componentName )
self.componentName = self.componentName[1:]
else:
self.__setDefaultSection( "/Scripts/%s" % self.componentName )
else:
self.__setDefaultSection( "/" )
except Exception, e:
errorsList.append( str( e ) )
self.unprocessedSwitches = []
for optionName, optionValue in self.parsedOptionList:
optionName = optionName.lstrip( "-" )
for definedOptionTuple in self.commandOptionList:
if optionName == definedOptionTuple[0].replace( ":", "" ) or \
optionName == definedOptionTuple[1].replace( "=", "" ):
if definedOptionTuple[3]:
retVal = definedOptionTuple[3]( optionValue )
if type( retVal ) != types.DictType:
errorsList.append( "Callback for switch '%s' does not return S_OK or S_ERROR" % optionName )
elif not retVal[ 'OK' ]:
errorsList.append( retVal[ 'Message' ] )
else:
self.unprocessedSwitches.append( ( optionName, optionValue ) )
if len( errorsList ) > 0:
return S_ERROR( "\n%s" % "\n".join( errorsList ) )
return S_OK()
def disableCS( self ):
"""
Do not contact Configuration Server upon initialization
"""
gRefresher.disable()
def enableCS( self ):
"""
Force the connection the Configuration Server
"""
gRefresher.enable()
return S_OK()
def isCSEnabled( self ):
"""
Retrieve current status of the connection to Configuration Server
"""
return gRefresher.isEnabled()
def syncRemoteConfiguration( self, strict = False ):
"""
Force a Resync with Configuration Server
Under normal conditions this is triggered by an access to any configuration data
"""
if self.componentName == "Configuration/Server" :
if gConfigurationData.isMaster():
gLogger.info( "Starting Master Configuration Server" )
gRefresher.disable()
return S_OK()
retDict = gRefresher.forceRefresh()
if not retDict['OK']:
gLogger.error( "Can't update from any server", retDict[ 'Message' ] )
if strict:
return retDict
return S_OK()
def __setDefaultSection( self, sectionPath ):
self.currentSectionPath = sectionPath
self.loggingSection = self.currentSectionPath
def setConfigurationForServer( self, serviceName ):
"""
Declare this is a DIRAC service
"""
self.componentName = serviceName
self.componentType = "service"
def setConfigurationForAgent( self, agentName ):
"""
Declare this is a DIRAC agent
"""
self.componentName = agentName
self.componentType = "agent"
def setConfigurationForExecutor( self, executorName ):
"""
Declare this is a DIRAC agent
"""
self.componentName = executorName
self.componentType = "executor"
def setConfigurationForWeb( self, webName ):
"""
Declare this is a DIRAC agent
"""
self.componentName = webName
self.componentType = "web"
def setConfigurationForScript( self, scriptName ):
"""
Declare this is a DIRAC script
"""
self.componentName = scriptName
self.componentType = "script"
def __setSectionByCmd( self, value ):
if value[0] != "/":
return S_ERROR( "%s is not a valid section. It should start with '/'" % value )
self.currentSectionPath = value
return S_OK()
def __setOptionByCmd( self, value ):
valueList = value.split( "=" )
if len( valueList ) < 2:
# FIXME: in the method above an exception is raised, check consitency
return S_ERROR( "-o expects a option=value argument.\nFor example %s -o Port=1234" % sys.argv[0] )
self.__setOptionValue( valueList[0] , "=".join( valueList[1:] ) )
return S_OK()
def __setUseCertByCmd( self, value ):
useCert = "no"
if value.lower() in ( "y", "yes", "true" ):
useCert = "yes"
self.__setOptionValue( "/DIRAC/Security/UseServerCertificate", useCert )
return S_OK()
def __setDebugMode( self, dummy = False ):
self.__debugMode += 1
return S_OK()
def __setAutoreload( self, filepath = False ):
devLoader = Devloader()
devLoader.bootstrap()
if filepath:
devLoader.watchFile( filepath )
gLogger.notice( "Devloader started" )
return S_OK()
def getDebugMode( self ):
return self.__debugMode
def showLicense( self, dummy = False ):
"""
Print license
"""
lpath = os.path.join( DIRAC.rootPath, "DIRAC", "LICENSE" )
sys.stdout.write( " - DIRAC is GPLv3 licensed\n\n" )
try:
with open( lpath ) as fd:
sys.stdout.write( fd.read() )
except IOError:
sys.stdout.write( "Can't find GPLv3 license at %s. Somebody stole it!\n" % lpath )
sys.stdout.write( "Please check out http://www.gnu.org/licenses/gpl-3.0.html for more info\n" )
DIRAC.exit(0)
def showHelp( self, dummy = False ):
"""
Printout help message including a Usage message if defined via setUsageMessage method
"""
if self.__usageMessage:
gLogger.notice( '\n'+self.__usageMessage.lstrip() )
else:
gLogger.notice( "\nUsage:" )
gLogger.notice( "\n %s (<options>|<cfgFile>)*" % os.path.basename( sys.argv[0] ) )
if dummy:
gLogger.notice( dummy )
gLogger.notice( "\nGeneral options:" )
iLastOpt = 0
for iPos in range( len( self.commandOptionList ) ):
optionTuple = self.commandOptionList[ iPos ]
if optionTuple[0].endswith( ':' ):
line = "\n -%s --%s : %s" % ( optionTuple[0][:-1].ljust( 2 ),
(optionTuple[1][:-1] + ' <value> ').ljust( 22 ),
optionTuple[2] )
gLogger.notice( line )
else:
gLogger.notice( "\n -%s --%s : %s" % ( optionTuple[0].ljust( 2 ), optionTuple[1].ljust( 22 ), optionTuple[2] ) )
iLastOpt = iPos
if optionTuple[0] == 'h':
#Last general opt is always help
break
if iLastOpt + 1 < len( self.commandOptionList ):
gLogger.notice( " \nOptions:" )
for iPos in range( iLastOpt + 1, len( self.commandOptionList ) ):
optionTuple = self.commandOptionList[ iPos ]
if optionTuple[0].endswith( ':' ):
line = "\n -%s --%s : %s" % ( optionTuple[0][:-1].ljust( 2 ),
(optionTuple[1][:-1] + ' <value> ').ljust( 22 ),
optionTuple[2] )
gLogger.notice( line )
else:
gLogger.notice( "\n -%s --%s : %s" % ( optionTuple[0].ljust( 2 ), optionTuple[1].ljust( 22 ), optionTuple[2] ) )
gLogger.notice( "\n" )
DIRAC.exit( 0 )
def deleteOption( self, optionPath ):
"""
Remove a Configuration Option from the local Configuration
"""
gConfigurationData.deleteOptionInCFG( optionPath )
|
avedaee/DIRAC
|
ConfigurationSystem/Client/LocalConfiguration.py
|
Python
|
gpl-3.0
| 17,793
|
[
"DIRAC"
] |
251ffc9c0851f7e1bce635a8a685fd90abe5584ed7e25d7c424933aed9eaec53
|
# $HeadURL: $
""" ElementInspectorAgent
This agent inspect Resources, and evaluates policies that apply.
"""
import datetime
import math
import Queue
from DIRAC import S_ERROR, S_OK
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.PolicySystem.PEP import PEP
from DIRAC.ResourceStatusSystem.Utilities import Utils
ResourceManagementClient = getattr(Utils.voimport( 'DIRAC.ResourceStatusSystem.Client.ResourceManagementClient' ),'ResourceManagementClient')
__RCSID__ = '$Id: $'
AGENT_NAME = 'ResourceStatus/ElementInspectorAgent'
class ElementInspectorAgent( AgentModule ):
""" ElementInspectorAgent
The ElementInspector agent is a generic agent used to check the elements
of one of the elementTypes ( e.g. Site, Resource, Node ).
This Agent takes care of the Elements. In order to do so, it gathers
the eligible ones and then evaluates their statuses with the PEP.
"""
# Max number of worker threads by default
__maxNumberOfThreads = 15
# Inspection freqs, defaults, the lower, the higher priority to be checked.
# Error state usually means there is a glitch somewhere, so it has the highest
# priority.
__checkingFreqs = {
'Active' : 20,
'Degraded' : 20,
'Probing' : 20,
'Banned' : 15,
'Unknown' : 10,
'Error' : 5
}
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
# ElementType, to be defined among Site, Resource or Node
self.elementType = ''
self.elementsToBeChecked = None
self.threadPool = None
self.rsClient = None
self.clients = {}
def initialize( self ):
""" Standard initialize.
"""
maxNumberOfThreads = self.am_getOption( 'maxNumberOfThreads', self.__maxNumberOfThreads )
self.threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads )
self.elementType = self.am_getOption( 'elementType', self.elementType )
self.rsClient = ResourceStatusClient()
self.clients[ 'ResourceStatusClient' ] = self.rsClient
self.clients[ 'ResourceManagementClient' ] = ResourceManagementClient()
if not self.elementType:
return S_ERROR( 'Missing elementType' )
return S_OK()
def execute( self ):
""" execute
This is the main method of the agent. It gets the elements from the Database
which are eligible to be re-checked, calculates how many threads should be
started and spawns them. Each thread will get an element from the queue until
it is empty. At the end, the method will join the queue such that the agent
will not terminate a cycle until all elements have been processed.
"""
# Gets elements to be checked ( returns a Queue )
elementsToBeChecked = self.getElementsToBeChecked()
if not elementsToBeChecked[ 'OK' ]:
self.log.error( elementsToBeChecked[ 'Message' ] )
return elementsToBeChecked
self.elementsToBeChecked = elementsToBeChecked[ 'Value' ]
queueSize = self.elementsToBeChecked.qsize()
pollingTime = self.am_getPollingTime()
# Assigns number of threads on the fly such that we exhaust the PollingTime
# without having to spawn too many threads. We assume 10 seconds per element
# to be processed ( actually, it takes something like 1 sec per element ):
# numberOfThreads = elements * 10(s/element) / pollingTime
numberOfThreads = int( math.ceil( queueSize * 10. / pollingTime ) )
self.log.info( 'Needed %d threads to process %d elements' % ( numberOfThreads, queueSize ) )
for _x in xrange( numberOfThreads ):
jobUp = self.threadPool.generateJobAndQueueIt( self._execute )
if not jobUp[ 'OK' ]:
self.log.error( jobUp[ 'Message' ] )
self.log.info( 'blocking until all elements have been processed' )
# block until all tasks are done
self.elementsToBeChecked.join()
self.log.info( 'done')
return S_OK()
def getElementsToBeChecked( self ):
""" getElementsToBeChecked
This method gets all the rows in the <self.elementType>Status table, and then
discards entries with TokenOwner != rs_svc. On top of that, there are check
frequencies that are applied: depending on the current status of the element,
they will be checked more or less often.
"""
toBeChecked = Queue.Queue()
# We get all the elements, then we filter.
elements = self.rsClient.selectStatusElement( self.elementType, 'Status' )
if not elements[ 'OK' ]:
return elements
utcnow = datetime.datetime.utcnow().replace( microsecond = 0 )
# filter elements by Type
for element in elements[ 'Value' ]:
# Maybe an overkill, but this way I have NEVER again to worry about order
# of elements returned by mySQL on tuples
elemDict = dict( zip( elements[ 'Columns' ], element ) )
# This if-clause skips all the elements that are should not be checked yet
timeToNextCheck = self.__checkingFreqs[ elemDict[ 'Status' ] ]
if utcnow <= elemDict[ 'LastCheckTime' ] + datetime.timedelta( minutes = timeToNextCheck ):
continue
# We skip the elements with token different than "rs_svc"
if elemDict[ 'TokenOwner' ] != 'rs_svc':
self.log.verbose( 'Skipping %s ( %s ) with token %s' % ( elemDict[ 'Name' ],
elemDict[ 'StatusType' ],
elemDict[ 'TokenOwner' ]
))
continue
# We are not checking if the item is already on the queue or not. It may
# be there, but in any case, it is not a big problem.
lowerElementDict = { 'element' : self.elementType }
for key, value in elemDict.items():
lowerElementDict[ key[0].lower() + key[1:] ] = value
# We add lowerElementDict to the queue
toBeChecked.put( lowerElementDict )
self.log.verbose( '%s # "%s" # "%s" # %s # %s' % ( elemDict[ 'Name' ],
elemDict[ 'ElementType' ],
elemDict[ 'StatusType' ],
elemDict[ 'Status' ],
elemDict[ 'LastCheckTime' ]) )
return S_OK( toBeChecked )
# Private methods ............................................................
def _execute( self ):
"""
Method run by the thread pool. It enters a loop until there are no elements
on the queue. On each iteration, it evaluates the policies for such element
and enforces the necessary actions. If there are no more elements in the
queue, the loop is finished.
"""
pep = PEP( clients = self.clients )
while True:
try:
element = self.elementsToBeChecked.get_nowait()
except Queue.Empty:
return S_OK()
self.log.verbose( '%s ( %s / %s ) being processed' % ( element[ 'name' ],
element[ 'status' ],
element[ 'statusType' ] ) )
resEnforce = pep.enforce( element )
if not resEnforce[ 'OK' ]:
self.log.error( resEnforce[ 'Message' ] )
self.elementsToBeChecked.task_done()
continue
resEnforce = resEnforce[ 'Value' ]
oldStatus = resEnforce[ 'decissionParams' ][ 'status' ]
statusType = resEnforce[ 'decissionParams' ][ 'statusType' ]
newStatus = resEnforce[ 'policyCombinedResult' ][ 'Status' ]
reason = resEnforce[ 'policyCombinedResult' ][ 'Reason' ]
if oldStatus != newStatus:
self.log.info( '%s (%s) is now %s ( %s ), before %s' % ( element[ 'name' ],
statusType,
newStatus,
reason,
oldStatus ) )
# Used together with join !
self.elementsToBeChecked.task_done()
#...............................................................................
#EOF
|
calancha/DIRAC
|
ResourceStatusSystem/Agent/ElementInspectorAgent.py
|
Python
|
gpl-3.0
| 9,022
|
[
"DIRAC"
] |
d64e57dcc6570e8e7276cdad010fce350396945b27ac4ee4c5cac71a01d31535
|
data = (
'gwan', # 0x00
'gwanj', # 0x01
'gwanh', # 0x02
'gwad', # 0x03
'gwal', # 0x04
'gwalg', # 0x05
'gwalm', # 0x06
'gwalb', # 0x07
'gwals', # 0x08
'gwalt', # 0x09
'gwalp', # 0x0a
'gwalh', # 0x0b
'gwam', # 0x0c
'gwab', # 0x0d
'gwabs', # 0x0e
'gwas', # 0x0f
'gwass', # 0x10
'gwang', # 0x11
'gwaj', # 0x12
'gwac', # 0x13
'gwak', # 0x14
'gwat', # 0x15
'gwap', # 0x16
'gwah', # 0x17
'gwae', # 0x18
'gwaeg', # 0x19
'gwaegg', # 0x1a
'gwaegs', # 0x1b
'gwaen', # 0x1c
'gwaenj', # 0x1d
'gwaenh', # 0x1e
'gwaed', # 0x1f
'gwael', # 0x20
'gwaelg', # 0x21
'gwaelm', # 0x22
'gwaelb', # 0x23
'gwaels', # 0x24
'gwaelt', # 0x25
'gwaelp', # 0x26
'gwaelh', # 0x27
'gwaem', # 0x28
'gwaeb', # 0x29
'gwaebs', # 0x2a
'gwaes', # 0x2b
'gwaess', # 0x2c
'gwaeng', # 0x2d
'gwaej', # 0x2e
'gwaec', # 0x2f
'gwaek', # 0x30
'gwaet', # 0x31
'gwaep', # 0x32
'gwaeh', # 0x33
'goe', # 0x34
'goeg', # 0x35
'goegg', # 0x36
'goegs', # 0x37
'goen', # 0x38
'goenj', # 0x39
'goenh', # 0x3a
'goed', # 0x3b
'goel', # 0x3c
'goelg', # 0x3d
'goelm', # 0x3e
'goelb', # 0x3f
'goels', # 0x40
'goelt', # 0x41
'goelp', # 0x42
'goelh', # 0x43
'goem', # 0x44
'goeb', # 0x45
'goebs', # 0x46
'goes', # 0x47
'goess', # 0x48
'goeng', # 0x49
'goej', # 0x4a
'goec', # 0x4b
'goek', # 0x4c
'goet', # 0x4d
'goep', # 0x4e
'goeh', # 0x4f
'gyo', # 0x50
'gyog', # 0x51
'gyogg', # 0x52
'gyogs', # 0x53
'gyon', # 0x54
'gyonj', # 0x55
'gyonh', # 0x56
'gyod', # 0x57
'gyol', # 0x58
'gyolg', # 0x59
'gyolm', # 0x5a
'gyolb', # 0x5b
'gyols', # 0x5c
'gyolt', # 0x5d
'gyolp', # 0x5e
'gyolh', # 0x5f
'gyom', # 0x60
'gyob', # 0x61
'gyobs', # 0x62
'gyos', # 0x63
'gyoss', # 0x64
'gyong', # 0x65
'gyoj', # 0x66
'gyoc', # 0x67
'gyok', # 0x68
'gyot', # 0x69
'gyop', # 0x6a
'gyoh', # 0x6b
'gu', # 0x6c
'gug', # 0x6d
'gugg', # 0x6e
'gugs', # 0x6f
'gun', # 0x70
'gunj', # 0x71
'gunh', # 0x72
'gud', # 0x73
'gul', # 0x74
'gulg', # 0x75
'gulm', # 0x76
'gulb', # 0x77
'guls', # 0x78
'gult', # 0x79
'gulp', # 0x7a
'gulh', # 0x7b
'gum', # 0x7c
'gub', # 0x7d
'gubs', # 0x7e
'gus', # 0x7f
'guss', # 0x80
'gung', # 0x81
'guj', # 0x82
'guc', # 0x83
'guk', # 0x84
'gut', # 0x85
'gup', # 0x86
'guh', # 0x87
'gweo', # 0x88
'gweog', # 0x89
'gweogg', # 0x8a
'gweogs', # 0x8b
'gweon', # 0x8c
'gweonj', # 0x8d
'gweonh', # 0x8e
'gweod', # 0x8f
'gweol', # 0x90
'gweolg', # 0x91
'gweolm', # 0x92
'gweolb', # 0x93
'gweols', # 0x94
'gweolt', # 0x95
'gweolp', # 0x96
'gweolh', # 0x97
'gweom', # 0x98
'gweob', # 0x99
'gweobs', # 0x9a
'gweos', # 0x9b
'gweoss', # 0x9c
'gweong', # 0x9d
'gweoj', # 0x9e
'gweoc', # 0x9f
'gweok', # 0xa0
'gweot', # 0xa1
'gweop', # 0xa2
'gweoh', # 0xa3
'gwe', # 0xa4
'gweg', # 0xa5
'gwegg', # 0xa6
'gwegs', # 0xa7
'gwen', # 0xa8
'gwenj', # 0xa9
'gwenh', # 0xaa
'gwed', # 0xab
'gwel', # 0xac
'gwelg', # 0xad
'gwelm', # 0xae
'gwelb', # 0xaf
'gwels', # 0xb0
'gwelt', # 0xb1
'gwelp', # 0xb2
'gwelh', # 0xb3
'gwem', # 0xb4
'gweb', # 0xb5
'gwebs', # 0xb6
'gwes', # 0xb7
'gwess', # 0xb8
'gweng', # 0xb9
'gwej', # 0xba
'gwec', # 0xbb
'gwek', # 0xbc
'gwet', # 0xbd
'gwep', # 0xbe
'gweh', # 0xbf
'gwi', # 0xc0
'gwig', # 0xc1
'gwigg', # 0xc2
'gwigs', # 0xc3
'gwin', # 0xc4
'gwinj', # 0xc5
'gwinh', # 0xc6
'gwid', # 0xc7
'gwil', # 0xc8
'gwilg', # 0xc9
'gwilm', # 0xca
'gwilb', # 0xcb
'gwils', # 0xcc
'gwilt', # 0xcd
'gwilp', # 0xce
'gwilh', # 0xcf
'gwim', # 0xd0
'gwib', # 0xd1
'gwibs', # 0xd2
'gwis', # 0xd3
'gwiss', # 0xd4
'gwing', # 0xd5
'gwij', # 0xd6
'gwic', # 0xd7
'gwik', # 0xd8
'gwit', # 0xd9
'gwip', # 0xda
'gwih', # 0xdb
'gyu', # 0xdc
'gyug', # 0xdd
'gyugg', # 0xde
'gyugs', # 0xdf
'gyun', # 0xe0
'gyunj', # 0xe1
'gyunh', # 0xe2
'gyud', # 0xe3
'gyul', # 0xe4
'gyulg', # 0xe5
'gyulm', # 0xe6
'gyulb', # 0xe7
'gyuls', # 0xe8
'gyult', # 0xe9
'gyulp', # 0xea
'gyulh', # 0xeb
'gyum', # 0xec
'gyub', # 0xed
'gyubs', # 0xee
'gyus', # 0xef
'gyuss', # 0xf0
'gyung', # 0xf1
'gyuj', # 0xf2
'gyuc', # 0xf3
'gyuk', # 0xf4
'gyut', # 0xf5
'gyup', # 0xf6
'gyuh', # 0xf7
'geu', # 0xf8
'geug', # 0xf9
'geugg', # 0xfa
'geugs', # 0xfb
'geun', # 0xfc
'geunj', # 0xfd
'geunh', # 0xfe
'geud', # 0xff
)
|
gquirozbogner/contentbox-master
|
third_party/unidecode/x0ad.py
|
Python
|
apache-2.0
| 5,024
|
[
"GULP"
] |
b742dee26ff377a1330cf35325e25b6f2a3f7abdb63baca2f1edd2e4a27754d3
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from zoo.orca.automl.model.base_keras_model import KerasBaseModel, KerasModelBuilder
import numpy as np
import tensorflow as tf
import pytest
def get_data():
def get_linear_data(a, b, size):
x = np.arange(0, 10, 10 / size, dtype=np.float32)
y = a*x + b
return x, y
train_x, train_y = get_linear_data(2, 5, 1000)
val_x, val_y = get_linear_data(2, 5, 400)
data = {'x': train_x, 'y': train_y, 'val_x': val_x, 'val_y': val_y}
return data
def model_creator_keras(config):
"""Returns a tf.keras model"""
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(1)
])
model.compile(loss="mse",
optimizer='sgd',
metrics=["mse"])
return model
def model_creator_multiple_metrics(config):
"""Returns a tf.keras model"""
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(1)
])
model.compile(loss="mse",
optimizer='sgd',
metrics=["mse", "mae"])
return model
class TestBaseKerasModel(TestCase):
data = get_data()
def test_fit_evaluate(self):
modelBuilder_keras = KerasModelBuilder(model_creator_keras)
model = modelBuilder_keras.build(config={
"lr": 1e-2,
"batch_size": 32,
})
val_result = model.fit_eval(data=(self.data["x"], self.data["y"]),
validation_data=(self.data["val_x"], self.data["val_y"]),
metric="mse",
epochs=20)
assert val_result.get("mse")
def test_fit_eval_default_metric(self):
modelBuilder_keras = KerasModelBuilder(model_creator_keras)
model = modelBuilder_keras.build(config={
"lr": 1e-2,
"batch_size": 32,
})
val_result = model.fit_eval(data=(self.data["x"], self.data["y"]),
validation_data=(self.data["val_x"], self.data["val_y"]),
epochs=20)
hist_metric_name = tf.keras.metrics.get("mse").__name__
assert val_result.get(hist_metric_name)
def test_multiple_metrics_default(self):
modelBuilder_keras = KerasModelBuilder(model_creator_multiple_metrics)
model = modelBuilder_keras.build(config={
"lr": 1e-2,
"batch_size": 32,
})
with pytest.raises(ValueError):
model.fit_eval(data=(self.data["x"], self.data["y"]),
validation_data=(self.data["val_x"], self.data["val_y"]),
epochs=20)
def test_uncompiled_model(self):
def model_creator(config):
"""Returns a tf.keras model"""
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(1)
])
return model
modelBuilder_keras = KerasModelBuilder(model_creator)
with pytest.raises(ValueError):
model = modelBuilder_keras.build(config={
"lr": 1e-2,
"batch_size": 32,
})
model.fit_eval(data=(self.data["x"], self.data["y"]),
validation_data=(self.data["val_x"], self.data["val_y"]),
metric="mse",
epochs=20)
def test_unaligned_metric_value(self):
modelBuilder_keras = KerasModelBuilder(model_creator_keras)
model = modelBuilder_keras.build(config={
"lr": 1e-2,
"batch_size": 32,
})
with pytest.raises(ValueError):
model.fit_eval(data=(self.data["x"], self.data["y"]),
validation_data=(self.data["val_x"], self.data["val_y"]),
metric='mae',
epochs=20)
if __name__ == "__main__":
pytest.main([__file__])
|
intel-analytics/analytics-zoo
|
pyzoo/test/zoo/orca/automl/model/test_base_keras_model.py
|
Python
|
apache-2.0
| 4,523
|
[
"ORCA"
] |
bb1609a23e613fcb52458a02cb804037841ecc0620c354b6fe8b5680f20a78d4
|
import json
from pathlib import Path
import logging
import click
import tornado.ioloop
import tornado.web
import tornado.websocket
from tornado.log import enable_pretty_logging
import pyqrcode
from .export import data2usd_ascii, data2usdz, data2gltf, data2obj
from . import __version__
handler = logging.StreamHandler() # FileHandler(log_file_filename)
enable_pretty_logging()
for i in ["tornado.access","tornado.application","tornado.general"]:
logger = logging.getLogger(i)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
class MainHandler(tornado.web.RequestHandler):
""" Renders the index file """
def get(self):
return self.write(json.dumps(DATA)+"\n")
def post(self):
global DATA
DATA = json.loads(self.request.body)
self.broadcastRefresh()
return self.write("OK\n")
def broadcastRefresh(self):
broadcast({'key': 'reload_data'})
def check_xsrf_cookie(self):
"""Bypass xsrf cookie checks when token-authenticated"""
# TODO check whether we really are alrady authenticated - else
# this opens some security problems!
print("check_xsrf_cookie")
return
_external_base_url = None
_base_path = '/'
_token = None
_IP = None
def external_url(client_url, file='index.html'):
global _external_base_url, _IP, _token
from urllib.parse import urlparse, urljoin
o = urlparse(client_url)
tok = f'?token={_token}' if _token is not None else ''
if o.hostname not in ["localhost","127.0.0.1","0.0.0.0",]:
# client is using a host that is probably better
# than what we would get out of get_ip, so use that
# this is also important in Reverse Proxy-Settings, eg. on binderhub
return urljoin(client_url, file+tok)
if _external_base_url is None:
if _IP is None:
_IP = get_ip()
port = f':{_PORT}' if _PORT is not None else ''
_external_base_url = f'http://{_IP}{port}{_base_path}'
return _external_base_url+file+tok
class QRHandler(tornado.web.RequestHandler):
"""Renders QR Codes"""
def get(self):
# TODO check these arguments - they could be malicious!
client_url = self.get_argument('location')
file = self.get_argument('file', 'index.html')
print(client_url)
url = external_url(client_url, file=file)
result = { 'qr': pyqrcode.QRCode(url).code, 'url': url }
return self.write(json.dumps(result)+"\n")
class DataHandler(tornado.web.RequestHandler):
"""Renders QR Codes"""
def get(self):
return self.write(json.dumps(DATA)+"\n")
class StatusHandler(tornado.web.RequestHandler):
"""Renders QR Codes"""
def get(self):
self.set_header("X-PlotAR-Version", __version__)
return self.write(json.dumps(status())+"\n")
from . import export
class USDHandler(tornado.web.RequestHandler):
"""Renders the USDZ format used on iOS"""
def get(self):
if self.request.path.endswith(".usda"):
result, _assets = data2usd_ascii(DATA)
self.write(result)
self.set_header('Content-Type', 'text/plain')
return
result = data2usdz(DATA)
self.write(result)
self.set_header('Content-Type', 'model/vnd.usd+zip')
class GLTFHandler(tornado.web.RequestHandler):
"""Renders the GLTF format used on Android"""
def get(self):
result = data2gltf(DATA)
self.write(result)
self.set_header('Content-Type', 'model/gltf+json')
class OBJHandler(tornado.web.RequestHandler):
"""Renders the USDA format usably on iOS"""
def get(self):
result = data2obj(DATA)
self.set_header('Content-Type', 'text/plain')
self.write(result)
def defaultData():
import numpy as np
from .client import plotar
data = np.random.normal(size=(100,3))
col = np.random.randint(4, size=100)
return plotar(data, col, return_data=True, host=None, name='Gaussian Sample', push_data=False )
# The list of currently connected clients
CLIENTS = []
DATA = {}
try:
DATA = defaultData()
except Exception as e:
logger.info("Could not load data.json: ", e)
_PORT = None
def broadcast(message):
if not isinstance(message, str):
message = json.dumps(message)
print(f"Sending message: {message}")
for i,c in enumerate(CLIENTS):
print(f"Sending to client {i}")
c.write_message(message)
def broadcast_status():
broadcast(status())
def status():
dev, contr, focus = 0, 0, 0
for c in CLIENTS:
dev += int(c.is_device)
contr += int(c.is_controller)
focus += int(c.has_focus)
status = {'numDevices': dev, 'numControllers': contr, 'numFocus': focus}
md = DATA['metadata'] if DATA is not None and 'metadata' in DATA else {}
return {'status': status, 'metadata': md}
class PlotARWebSocketHandler(tornado.websocket.WebSocketHandler):
""" The chat implemententation, all data send to server is json, all responses are json """
def open(self):
print("Client connecting /ws")
self.is_device = False
self.is_controller = False
self.has_focus = False
CLIENTS.append(self)
def on_message(self, message):
print(f"got message: {message}")
try:
body = json.loads(message)
except:
return self.write_message('Format error')
sendStatus = False
if 'focus' in body:
self.has_focus = bool(body['focus'])
sendStatus = True
if 'controller' in body:
self.is_controller = bool(body['controller'])
sendStatus = True
if 'device' in body:
self.is_device = bool(body['device'])
sendStatus = True
if sendStatus:
broadcast_status()
return
if 'shutdown' in body and body['shutdown']:
_app.stop()
broadcast(body)
def on_close(self):
CLIENTS.remove(self)
broadcast_status()
def html(x):
pth = Path(__file__).parent / 'html' / x
ret = str(pth)
return ret
_mappings = [
(r"/", MainHandler),
(r"/data.json", DataHandler),
(r"/status.json", StatusHandler),
(r"/qr.json", QRHandler),
(r"/data.usdz", USDHandler),
(r"/data.usda", USDHandler),
(r"/data.gltf", GLTFHandler),
(r"/data.obj", OBJHandler),
(r"/ws", PlotARWebSocketHandler),
(r"/index.html(.*)", tornado.web.StaticFileHandler, {"path": html('index.html')}),
# (r"/model.html(.*)", tornado.web.StaticFileHandler, {"path": html('model.html')}),
(r"/keyboard.html(.*)", tornado.web.StaticFileHandler, {"path": html('keyboard.html')}),
(r"/js/(.*)", tornado.web.StaticFileHandler, {"path": html('js')}),
(r"/textures/(.*)", tornado.web.StaticFileHandler, {"path": html('textures')})
]
#_app = tornado.web.Application(_mappings)
@click.command()
@click.option('-p', '--port', default=2908, help="Port to listen on")
# @click.option('-h', '--host', default=, help="format: gltf usdz usda obj. Default is to take extension of out or else gltf")
@click.option('-d', '--data', default=None, type=click.File(), help="Data.json file to open initially")
@click.option('--debug/--no-debug', default=False, help="Start Server in Debug mode (autoreload)")
def start_server(port=2908, data=None, debug=False):
_start_server(port=port, data=data, debug=debug)
def _start_server(port=2908, data=None, debug=False):
print(f"Welcome to PlotAR server on port {port}")
global _PORT, _app, DATA
if data:
global DATA
DATA = json.load(data)
_app = tornado.web.Application(_mappings, debug=debug)
_PORT = port
_app.listen(port)
tornado.ioloop.IOLoop.instance().start()
print("hello")
def get_ip():
"""Get the publicly visible IP address."""
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
# see https://stackoverflow.com/a/28950776
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
if __name__ == '__main__':
start_server()
|
thomann/plotVR
|
plotAR-py/plotar/server.py
|
Python
|
agpl-3.0
| 8,251
|
[
"Gaussian"
] |
a0cfdde547d534a8c3b3bd95d88485c026d33445a93fe315d999f414d4e80898
|
import os
import glob
import subprocess
from gpaw.test.big.agts import AGTSQueue
from gpaw.test.big.niflheim import NiflheimCluster
def cmd(c):
x = os.system(c)
assert x == 0, c
os.chdir('agts')
cmd('svn checkout https://svn.fysik.dtu.dk/projects/gpaw/trunk gpaw')
cmd("""echo "\
cd agts/gpaw&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
module load openmpi/1.3.3-1.el5.fys.open64.4.2.3 && \
module load hdf5/1.8.6-5.el5.fys.open64.4.2.3.openmpi.1.3.3 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-xeon-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext" | ssh thul bash""")
cmd("""echo "\
cd agts/gpaw&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-opteron-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext" | ssh fjorm bash""")
cmd("""wget --no-check-certificate --quiet \
http://wiki.fysik.dtu.dk/gpaw-files/gpaw-setups-latest.tar.gz && \
tar xzf gpaw-setups-latest.tar.gz && \
rm gpaw-setups-latest.tar.gz && \
mv gpaw-setups-[0-9]* gpaw/gpaw-setups""")
cmd('svn checkout https://svn.fysik.dtu.dk/projects/ase/trunk ase')
queue = AGTSQueue()
queue.collect()
cluster = NiflheimCluster('~/agts/ase', '~/agts/gpaw/gpaw-setups')
#queue.jobs = [job for job in queue.jobs if job.script == 'testsuite.agts.py']
nfailed = queue.run(cluster)
queue.copy_created_files('/home/camp2/jensj/WWW/gpaw-files')
if 0:
# Analysis:
import matplotlib
matplotlib.use('Agg')
from gpaw.test.big.analysis import analyse
user = os.environ['USER']
analyse(queue,
'../analysis/analyse.pickle', # file keeping history
'../analysis', # Where to dump figures
rev=niflheim.revision,
#mailto='gpaw-developers@listserv.fysik.dtu.dk',
mailto='jensj@fysik.dtu.dk',
mailserver='servfys.fysik.dtu.dk',
attachment='status.log')
|
ajylee/gpaw-rtxs
|
tools/niflheim-agts.py
|
Python
|
gpl-3.0
| 2,077
|
[
"ASE",
"GPAW"
] |
e45e8bd6ef940543c435c046953ead1f47eddab55f98ab5a3688e7075031c900
|
import numpy as np
from math import pi, log
import pylab
from scipy import fft, ifft
from scipy.optimize import curve_fit
i = 10000
x = np.linspace(0, 3.5 * pi, i)
y = (0.3*np.sin(x) + np.sin(1.3 * x) + 0.9 * np.sin(4.2 * x) + 0.06 *
np.random.randn(i))
def _datacheck_peakdetect(x_axis, y_axis):
if x_axis is None:
x_axis = range(len(y_axis))
if len(y_axis) != len(x_axis):
raise (ValueError,
'Input vectors y_axis and x_axis must have same length')
#needs to be a numpy array
y_axis = np.array(y_axis)
x_axis = np.array(x_axis)
return x_axis, y_axis
def _peakdetect_parabole_fitter(raw_peaks, x_axis, y_axis, points):
"""
Performs the actual parabole fitting for the peakdetect_parabole function.
keyword arguments:
raw_peaks -- A list of either the maximium or the minimum peaks, as given
by the peakdetect_zero_crossing function, with index used as x-axis
x_axis -- A numpy list of all the x values
y_axis -- A numpy list of all the y values
points -- How many points around the peak should be used during curve
fitting, must be odd.
return -- A list giving all the peaks and the fitted waveform, format:
[[x, y, [fitted_x, fitted_y]]]
"""
func = lambda x, k, tau, m: k * ((x - tau) ** 2) + m
fitted_peaks = []
for peak in raw_peaks:
index = peak[0]
x_data = x_axis[index - points // 2: index + points // 2 + 1]
y_data = y_axis[index - points // 2: index + points // 2 + 1]
# get a first approximation of tau (peak position in time)
tau = x_axis[index]
# get a first approximation of peak amplitude
m = peak[1]
# build list of approximations
# k = -m as first approximation?
p0 = (-m, tau, m)
popt, pcov = curve_fit(func, x_data, y_data, p0)
# retrieve tau and m i.e x and y value of peak
x, y = popt[1:3]
# create a high resolution data set for the fitted waveform
x2 = np.linspace(x_data[0], x_data[-1], points * 10)
y2 = func(x2, *popt)
fitted_peaks.append([x, y, [x2, y2]])
return fitted_peaks
def peakdetect(y_axis, x_axis = None, lookahead = 300, delta=0):
"""
Converted from/based on a MATLAB script at:
http://billauer.co.il/peakdet.html
function for detecting local maximas and minmias in a signal.
Discovers peaks by searching for values which are surrounded by lower
or larger values for maximas and minimas respectively
keyword arguments:
y_axis -- A list containg the signal over which to find peaks
x_axis -- (optional) A x-axis whose values correspond to the y_axis list
and is used in the return to specify the postion of the peaks. If
omitted an index of the y_axis is used. (default: None)
lookahead -- (optional) distance to look ahead from a peak candidate to
determine if it is the actual peak (default: 200)
'(sample / period) / f' where '4 >= f >= 1.25' might be a good value
delta -- (optional) this specifies a minimum difference between a peak and
the following points, before a peak may be considered a peak. Useful
to hinder the function from picking up false peaks towards to end of
the signal. To work well delta should be set to delta >= RMSnoise * 5.
(default: 0)
delta function causes a 20% decrease in speed, when omitted
Correctly used it can double the speed of the function
return -- two lists [max_peaks, min_peaks] containing the positive and
negative peaks respectively. Each cell of the lists contains a tupple
of: (position, peak_value)
to get the average peak value do: np.mean(max_peaks, 0)[1] on the
results to unpack one of the lists into x, y coordinates do:
x, y = zip(*tab)
"""
max_peaks = []
min_peaks = []
dump = [] #Used to pop the first hit which almost always is false
# check input data
x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
# store data length for later use
length = len(y_axis)
#perform some checks
if lookahead < 1:
raise ValueError, "Lookahead must be '1' or above in value"
if not (np.isscalar(delta) and delta >= 0):
raise ValueError, "delta must be a positive number"
#maxima and minima candidates are temporarily stored in
#mx and mn respectively
mn, mx = np.Inf, -np.Inf
#Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(zip(x_axis[:-lookahead],
y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
####look for max####
if y < mx-delta and mx != np.Inf:
#Maxima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].max() < mx:
max_peaks.append([mxpos, mx])
dump.append(True)
#set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
if index+lookahead >= length:
#end is within lookahead no more peaks can be found
break
continue
#else: #slows shit down this does
# mx = ahead
# mxpos = x_axis[np.where(y_axis[index:index+lookahead]==mx)]
####look for min####
if y > mn+delta and mn != -np.Inf:
#Minima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].min() > mn:
min_peaks.append([mnpos, mn])
dump.append(False)
#set algorithm to only find maxima now
mn = -np.Inf
mx = -np.Inf
if index+lookahead >= length:
#end is within lookahead no more peaks can be found
break
#else: #slows shit down this does
# mn = ahead
# mnpos = x_axis[np.where(y_axis[index:index+lookahead]==mn)]
#Remove the false hit on the first value of the y_axis
try:
if dump[0]:
max_peaks.pop(0)
else:
min_peaks.pop(0)
del dump
except IndexError:
#no peaks were found, should the function return empty lists?
pass
return [max_peaks, min_peaks]
def peakdetect_fft(y_axis, x_axis, pad_len = 5):
"""
Performs a FFT calculation on the data and zero-pads the results to
increase the time domain resolution after performing the inverse fft and
send the data to the 'peakdetect' function for peak
detection.
Omitting the x_axis is forbidden as it would make the resulting x_axis
value silly if it was returned as the index 50.234 or similar.
Will find at least 1 less peak then the 'peakdetect_zero_crossing'
function, but should result in a more precise value of the peak as
resolution has been increased. Some peaks are lost in an attempt to
minimize spectral leakage by calculating the fft between two zero
crossings for n amount of signal periods.
The biggest time eater in this function is the ifft and thereafter it's
the 'peakdetect' function which takes only half the time of the ifft.
Speed improvementd could include to check if 2**n points could be used for
fft and ifft or change the 'peakdetect' to the 'peakdetect_zero_crossing',
which is maybe 10 times faster than 'peakdetct'. The pro of 'peakdetect'
is that it resutls in one less lost peak. It should also be noted that the
time used by the ifft function can change greatly depending on the input.
keyword arguments:
y_axis -- A list containg the signal over which to find peaks
x_axis -- A x-axis whose values correspond to the y_axis list and is used
in the return to specify the postion of the peaks.
pad_len -- (optional) By how many times the time resolution should be
increased by, e.g. 1 doubles the resolution. The amount is rounded up
to the nearest 2 ** n amount (default: 5)
return -- two lists [max_peaks, min_peaks] containing the positive and
negative peaks respectively. Each cell of the lists contains a tupple
of: (position, peak_value)
to get the average peak value do: np.mean(max_peaks, 0)[1] on the
results to unpack one of the lists into x, y coordinates do:
x, y = zip(*tab)
"""
# check input data
x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
zero_indices = zero_crossings(y_axis, window = 11)
#select a n amount of periods
last_indice = - 1 - (1 - len(zero_indices) & 1)
# Calculate the fft between the first and last zero crossing
# this method could be ignored if the begining and the end of the signal
# are discardable as any errors induced from not using whole periods
# should mainly manifest in the beginning and the end of the signal, but
# not in the rest of the signal
fft_data = fft(y_axis[zero_indices[0]:zero_indices[last_indice]])
padd = lambda x, c: x[:len(x) // 2] + [0] * c + x[len(x) // 2:]
n = lambda x: int(log(x)/log(2)) + 1
# padds to 2**n amount of samples
fft_padded = padd(list(fft_data), 2 **
n(len(fft_data) * pad_len) - len(fft_data))
# There is amplitude decrease directly proportional to the sample increase
sf = len(fft_padded) / float(len(fft_data))
# There might be a leakage giving the result an imaginary component
# Return only the real component
y_axis_ifft = ifft(fft_padded).real * sf #(pad_len + 1)
x_axis_ifft = np.linspace(
x_axis[zero_indices[0]], x_axis[zero_indices[last_indice]],
len(y_axis_ifft))
# get the peaks to the interpolated waveform
max_peaks, min_peaks = peakdetect(y_axis_ifft, x_axis_ifft, 500,
delta = abs(np.diff(y_axis).max() * 2))
#max_peaks, min_peaks = peakdetect_zero_crossing(y_axis_ifft, x_axis_ifft)
# store one 20th of a period as waveform data
data_len = int(np.diff(zero_indices).mean()) / 10
data_len += 1 - data_len & 1
fitted_wave = []
for peaks in [max_peaks, min_peaks]:
peak_fit_tmp = []
index = 0
for peak in peaks:
index = np.where(x_axis_ifft[index:]==peak[0])[0][0] + index
x_fit_lim = x_axis_ifft[index - data_len // 2:
index + data_len // 2 + 1]
y_fit_lim = y_axis_ifft[index - data_len // 2:
index + data_len // 2 + 1]
peak_fit_tmp.append([x_fit_lim, y_fit_lim])
fitted_wave.append(peak_fit_tmp)
#pylab.plot(range(len(fft_data)), fft_data)
#pylab.show()
pylab.plot(x_axis, y_axis)
pylab.hold(True)
pylab.plot(x_axis_ifft, y_axis_ifft)
#for max_p in max_peaks:
# pylab.plot(max_p[0], max_p[1], 'xr')
pylab.show()
return [max_peaks, min_peaks]
def peakdetect_parabole(y_axis, x_axis, points = 9):
"""
Function for detecting local maximas and minmias in a signal.
Discovers peaks by fitting the model function: y = k (x - tau) ** 2 + m
to the peaks. The amount of points used in the fitting is set by the
points argument.
Omitting the x_axis is forbidden as it would make the resulting x_axis
value silly if it was returned as index 50.234 or similar.
will find the same amount of peaks as the 'peakdetect_zero_crossing'
function, but might result in a more precise value of the peak.
keyword arguments:
y_axis -- A list containg the signal over which to find peaks
x_axis -- A x-axis whose values correspond to the y_axis list and is used
in the return to specify the postion of the peaks.
points -- (optional) How many points around the peak should be used during
curve fitting, must be odd (default: 9)
return -- two lists [max_peaks, min_peaks] containing the positive and
negative peaks respectively. Each cell of the lists contains a list
of: (position, peak_value)
to get the average peak value do: np.mean(max_peaks, 0)[1] on the
results to unpack one of the lists into x, y coordinates do:
x, y = zip(*max_peaks)
"""
# check input data
x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
# make the points argument odd
points += 1 - points % 2
#points += 1 - int(points) & 1 slower when int conversion needed
# get raw peaks
max_raw, min_raw = peakdetect_zero_crossing(y_axis)
# define output variable
max_peaks = []
min_peaks = []
max_ = _peakdetect_parabole_fitter(max_raw, x_axis, y_axis, points)
min_ = _peakdetect_parabole_fitter(min_raw, x_axis, y_axis, points)
max_peaks = map(lambda x: [x[0], x[1]], max_)
max_fitted = map(lambda x: x[-1], max_)
min_peaks = map(lambda x: [x[0], x[1]], min_)
min_fitted = map(lambda x: x[-1], min_)
#pylab.plot(x_axis, y_axis)
#pylab.hold(True)
#for max_p, max_f in zip(max_peaks, max_fitted):
# pylab.plot(max_p[0], max_p[1], 'x')
# pylab.plot(max_f[0], max_f[1], 'o', markersize = 2)
#for min_p, min_f in zip(min_peaks, min_fitted):
# pylab.plot(min_p[0], min_p[1], 'x')
# pylab.plot(min_f[0], min_f[1], 'o', markersize = 2)
#pylab.show()
return [max_peaks, min_peaks]
def peakdetect_sine(y_axis, x_axis, points = 9, lock_frequency = False):
"""
Function for detecting local maximas and minmias in a signal.
Discovers peaks by fitting the model function:
y = A * sin(2 * pi * f * x - tau) to the peaks. The amount of points used
in the fitting is set by the points argument.
Omitting the x_axis is forbidden as it would make the resulting x_axis
value silly if it was returned as index 50.234 or similar.
will find the same amount of peaks as the 'peakdetect_zero_crossing'
function, but might result in a more precise value of the peak.
The function might have some problems if the sine wave has a
non-negligible total angle i.e. a k*x component, as this messes with the
internal offset calculation of the peaks, might be fixed by fitting a
k * x + m function to the peaks for offset calculation.
keyword arguments:
y_axis -- A list containg the signal over which to find peaks
x_axis -- A x-axis whose values correspond to the y_axis list and is used
in the return to specify the postion of the peaks.
points -- (optional) How many points around the peak should be used during
curve fitting, must be odd (default: 9)
lock_frequency -- (optional) Specifies if the frequency argument of the
model function should be locked to the value calculated from the raw
peaks or if optimization process may tinker with it. (default: False)
return -- two lists [max_peaks, min_peaks] containing the positive and
negative peaks respectively. Each cell of the lists contains a tupple
of: (position, peak_value)
to get the average peak value do: np.mean(max_peaks, 0)[1] on the
results to unpack one of the lists into x, y coordinates do:
x, y = zip(*tab)
"""
# check input data
x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
# make the points argument odd
points += 1 - points % 2
#points += 1 - int(points) & 1 slower when int conversion needed
# get raw peaks
max_raw, min_raw = peakdetect_zero_crossing(y_axis)
# define output variable
max_peaks = []
min_peaks = []
# get global offset
offset = np.mean([np.mean(max_raw, 0)[1], np.mean(min_raw, 0)[1]])
# fitting a k * x + m function to the peaks might be better
#offset_func = lambda x, k, m: k * x + m
# calculate an approximate frequenzy of the signal
Hz = []
for raw in [max_raw, min_raw]:
if len(raw) > 1:
peak_pos = [x_axis[index] for index in zip(*raw)[0]]
Hz.append(np.mean(np.diff(peak_pos)))
Hz = 1 / np.mean(Hz)
# model function
# if cosine is used then tau could equal the x position of the peak
# if sine were to be used then tau would be the first zero crossing
if lock_frequency:
func = lambda x, A, tau: A * np.sin(2 * pi * Hz * (x - tau) + pi / 2)
else:
func = lambda x, A, Hz, tau: A * np.sin(2 * pi * Hz * (x - tau) +
pi / 2)
#func = lambda x, A, Hz, tau: A * np.cos(2 * pi * Hz * (x - tau))
#get peaks
fitted_peaks = []
for raw_peaks in [max_raw, min_raw]:
peak_data = []
for peak in raw_peaks:
index = peak[0]
x_data = x_axis[index - points // 2: index + points // 2 + 1]
y_data = y_axis[index - points // 2: index + points // 2 + 1]
# get a first approximation of tau (peak position in time)
tau = x_axis[index]
# get a first approximation of peak amplitude
A = peak[1]
# build list of approximations
if lock_frequency:
p0 = (A, tau)
else:
p0 = (A, Hz, tau)
# subtract offset from waveshape
y_data -= offset
popt, pcov = curve_fit(func, x_data, y_data, p0)
# retrieve tau and A i.e x and y value of peak
x = popt[-1]
y = popt[0]
# create a high resolution data set for the fitted waveform
x2 = np.linspace(x_data[0], x_data[-1], points * 10)
y2 = func(x2, *popt)
# add the offset to the results
y += offset
y2 += offset
y_data += offset
peak_data.append([x, y, [x2, y2]])
fitted_peaks.append(peak_data)
# structure date for output
max_peaks = map(lambda x: [x[0], x[1]], fitted_peaks[0])
max_fitted = map(lambda x: x[-1], fitted_peaks[0])
min_peaks = map(lambda x: [x[0], x[1]], fitted_peaks[1])
min_fitted = map(lambda x: x[-1], fitted_peaks[1])
#pylab.plot(x_axis, y_axis)
#pylab.hold(True)
#for max_p, max_f in zip(max_peaks, max_fitted):
# pylab.plot(max_p[0], max_p[1], 'x')
# pylab.plot(max_f[0], max_f[1], 'o', markersize = 2)
#for min_p, min_f in zip(min_peaks, min_fitted):
# pylab.plot(min_p[0], min_p[1], 'x')
# pylab.plot(min_f[0], min_f[1], 'o', markersize = 2)
#pylab.show()
return [max_peaks, min_peaks]
def peakdetect_sine_locked(y_axis, x_axis, points = 9):
"""
Convinience function for calling the 'peakdetect_sine' function with
the lock_frequency argument as True.
keyword arguments:
y_axis -- A list containg the signal over which to find peaks
x_axis -- A x-axis whose values correspond to the y_axis list and is used
in the return to specify the postion of the peaks.
points -- (optional) How many points around the peak should be used during
curve fitting, must be odd (default: 9)
return -- see 'peakdetect_sine'
"""
return peakdetect_sine(y_axis, x_axis, points, True)
def peakdetect_zero_crossing(y_axis, x_axis = None, window = 11):
"""
Function for detecting local maximas and minmias in a signal.
Discovers peaks by dividing the signal into bins and retrieving the
maximum and minimum value of each the even and odd bins respectively.
Division into bins is performed by smoothing the curve and finding the
zero crossings.
Suitable for repeatable signals, where some noise is tolerated. Excecutes
faster than 'peakdetect', although this function will break if the offset
of the signal is too large. It should also be noted that the first and
last peak will probably not be found, as this function only can find peaks
between the first and last zero crossing.
keyword arguments:
y_axis -- A list containg the signal over which to find peaks
x_axis -- (optional) A x-axis whose values correspond to the y_axis list
and is used in the return to specify the postion of the peaks. If
omitted an index of the y_axis is used. (default: None)
window -- the dimension of the smoothing window; should be an odd integer
(default: 11)
return -- two lists [max_peaks, min_peaks] containing the positive and
negative peaks respectively. Each cell of the lists contains a tupple
of: (position, peak_value)
to get the average peak value do: np.mean(max_peaks, 0)[1] on the
results to unpack one of the lists into x, y coordinates do:
x, y = zip(*tab)
"""
# check input data
x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
zero_indices = zero_crossings(y_axis, window = window)
period_lengths = np.diff(zero_indices)
bins_y = [y_axis[index:index + diff] for index, diff in
zip(zero_indices, period_lengths)]
bins_x = [x_axis[index:index + diff] for index, diff in
zip(zero_indices, period_lengths)]
even_bins_y = bins_y[::2]
odd_bins_y = bins_y[1::2]
even_bins_x = bins_x[::2]
odd_bins_x = bins_x[1::2]
hi_peaks_x = []
lo_peaks_x = []
#check if even bin contains maxima
if abs(even_bins_y[0].max()) > abs(even_bins_y[0].min()):
hi_peaks = [bin.max() for bin in even_bins_y]
lo_peaks = [bin.min() for bin in odd_bins_y]
# get x values for peak
for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, hi_peaks):
hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])
for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, lo_peaks):
lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])
else:
hi_peaks = [bin.max() for bin in odd_bins_y]
lo_peaks = [bin.min() for bin in even_bins_y]
# get x values for peak
for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, hi_peaks):
hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])
for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, lo_peaks):
lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])
max_peaks = [[x, y] for x,y in zip(hi_peaks_x, hi_peaks)]
min_peaks = [[x, y] for x,y in zip(lo_peaks_x, lo_peaks)]
return [max_peaks, min_peaks]
def _smooth(x, window_len=11, window='hanning'):
"""
smooth the data using a window of the requested size.
This method is based on the convolution of a scaled window on the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd
integer
window: the type of window from 'flat', 'hanning', 'hamming',
'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t = linspace(-2,2,0.1)
x = sin(t)+randn(len(t))*0.1
y = _smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman,
numpy.convolve, scipy.signal.lfilter
TODO: the window parameter could be the window itself if a list instead of
a string
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise(ValueError,
"Window is not one of '{0}', '{1}', '{2}', '{3}', '{4}'".format(
*('flat', 'hanning', 'hamming', 'bartlett', 'blackman')))
s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w = np.ones(window_len,'d')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode = 'valid')
return y
def zero_crossings(y_axis, window = 11):
"""
Algorithm to find zero crossings. Smoothens the curve and finds the
zero-crossings by looking for a sign change.
keyword arguments:
y_axis -- A list containg the signal over which to find zero-crossings
window -- the dimension of the smoothing window; should be an odd integer
(default: 11)
return -- the index for each zero-crossing
"""
# smooth the curve
length = len(y_axis)
x_axis = np.asarray(range(length), int)
# discard tail of smoothed signal
y_axis = _smooth(y_axis, window)[:length]
zero_crossings = np.where(np.diff(np.sign(y_axis)))[0]
indices = [x_axis[index] for index in zero_crossings]
# check if zero-crossings are valid
diff = np.diff(indices)
if diff.std() / diff.mean() > 0.2:
print diff.std() / diff.mean()
print np.diff(indices)
raise(ValueError,
"False zero-crossings found, indicates problem {0} or {1}".format(
"with smoothing window", "problem with offset"))
# check if any zero crossings were found
if len(zero_crossings) < 1:
raise(ValueError, "No zero crossings found")
return indices
# used this to test the fft function's sensitivity to spectral leakage
#return indices + np.asarray(30 * np.random.randn(len(indices)), int)
############################Frequency calculation#############################
# diff = np.diff(indices)
# time_p_period = diff.mean()
#
# if diff.std() / time_p_period > 0.1:
# raise ValueError,
# "smoothing window too small, false zero-crossing found"
#
# #return frequency
# return 1.0 / time_p_period
##############################################################################
def _test_zero():
_max, _min = peakdetect_zero_crossing(y,x)
def _test():
_max, _min = peakdetect(y,x, delta=0.30)
def _test_graph():
i = 10000
x = np.linspace(0,3.7*pi,i)
y = (0.3*np.sin(x) + np.sin(1.3 * x) + 0.9 * np.sin(4.2 * x) + 0.06 *
np.random.randn(i))
y *= -1
x = range(i)
_max, _min = peakdetect(y,x,750, 0.30)
xm = [p[0] for p in _max]
ym = [p[1] for p in _max]
xn = [p[0] for p in _min]
yn = [p[1] for p in _min]
plot = pylab.plot(x,y)
pylab.hold(True)
pylab.plot(xm, ym, 'r+')
pylab.plot(xn, yn, 'g+')
_max, _min = peak_det_bad.peakdetect(y, 0.7, x)
xm = [p[0] for p in _max]
ym = [p[1] for p in _max]
xn = [p[0] for p in _min]
yn = [p[1] for p in _min]
pylab.plot(xm, ym, 'y*')
pylab.plot(xn, yn, 'k*')
pylab.show()
if __name__ == "__main__":
from math import pi
import pylab
i = 10000
x = np.linspace(0,3.7*pi,i)
y = (0.3*np.sin(x) + np.sin(1.3 * x) + 0.9 * np.sin(4.2 * x) + 0.06 *
np.random.randn(i))
y *= -1
_max, _min = peakdetect(y, x, 750, 0.30)
xm = [p[0] for p in _max]
ym = [p[1] for p in _max]
xn = [p[0] for p in _min]
yn = [p[1] for p in _min]
plot = pylab.plot(x, y)
pylab.hold(True)
pylab.plot(xm, ym, 'r+')
pylab.plot(xn, yn, 'g+')
pylab.show()
|
bmazin/SDR
|
Setup/WideSweep/peakdetect.py
|
Python
|
gpl-2.0
| 28,254
|
[
"TINKER"
] |
f2c9432ae72c463d4e54cb092d43be1c1679784b6ac3c88cfb52990271cc5126
|
'''
Set up a cyclic peptide nanotube and visualise the structure with matplotlib.
@author: Mark Oakley
'''
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from tubemaker.nanotube import read_amber_coords, orient_coords, build_tube
if __name__ == "__main__":
# Locate Amber library
amber_home=os.environ.get('AMBERHOME')
lib = amber_home+"/dat/leap/lib/all_amino03.lib"
# Build nanotube
res_coords = read_amber_coords("ALA", lib)
res_coords = orient_coords(res_coords)
coords = build_tube(2, 8, res_coords)
# Visualise structure
fig = plt.figure(figsize=(5,5))
ax = fig.add_subplot(111, projection='3d')
ax.clear()
xs = coords[:,0]
ys = coords[:,1]
zs = coords[:,2]
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-10,10)
ax.set_ylim3d(-10,10)
ax.set_zlim3d(-10,10)
ax.scatter(xs,ys,zs,s=10)
plt.show()
|
marktoakley/PeptideNanoTubes
|
example/Tubemaker/plot.py
|
Python
|
gpl-3.0
| 974
|
[
"Amber"
] |
318676abbba8f0aed64773b641a7949341f75ec942f0741b070899ca97140663
|
# This file is part of Jeedom.
#
# Jeedom is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Jeedom is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Jeedom. If not, see <http://www.gnu.org/licenses/>.
import logging
import string
import sys
import os
import time
import datetime
import traceback
import re
import signal
from optparse import OptionParser
from os.path import join
import json
import argparse
try:
from jeedom.jeedom import *
except ImportError:
print("Error: importing module jeedom.jeedom")
sys.exit(1)
def read_socket():
global JEEDOM_SOCKET_MESSAGE
if not JEEDOM_SOCKET_MESSAGE.empty():
logging.debug("Message received in socket JEEDOM_SOCKET_MESSAGE")
message = json.loads(jeedom_utils.stripped(JEEDOM_SOCKET_MESSAGE.get()))
if message['apikey'] != _apikey:
logging.error("Invalid apikey from socket : " + str(message))
return
try:
print ('read')
except Exception, e:
logging.error('Send command to demon error : '+str(e))
def listen():
jeedom_socket.open()
try:
while 1:
time.sleep(0.5)
read_socket()
except KeyboardInterrupt:
shutdown()
# ----------------------------------------------------------------------------
def handler(signum=None, frame=None):
logging.debug("Signal %i caught, exiting..." % int(signum))
shutdown()
def shutdown():
logging.debug("Shutdown")
logging.debug("Removing PID file " + str(_pidfile))
try:
os.remove(_pidfile)
except:
pass
try:
jeedom_socket.close()
except:
pass
try:
jeedom_serial.close()
except:
pass
logging.debug("Exit 0")
sys.stdout.flush()
os._exit(0)
# ----------------------------------------------------------------------------
_log_level = "error"
_socket_port = 55009
_socket_host = 'localhost'
_device = 'auto'
_pidfile = '/tmp/demond.pid'
_apikey = ''
_callback = ''
_cycle = 0.3
parser = argparse.ArgumentParser(
description='Desmond Daemon for Jeedom plugin')
parser.add_argument("--device", help="Device", type=str)
parser.add_argument("--loglevel", help="Log Level for the daemon", type=str)
parser.add_argument("--callback", help="Callback", type=str)
parser.add_argument("--apikey", help="Apikey", type=str)
parser.add_argument("--cycle", help="Cycle to send event", type=str)
parser.add_argument("--pid", help="Pid file", type=str)
parser.add_argument("--socketport", help="Port for Zigbee server", type=str)
args = parser.parse_args()
if args.device:
_device = args.device
if args.loglevel:
_log_level = args.loglevel
if args.callback:
_callback = args.callback
if args.apikey:
_apikey = args.apikey
if args.pid:
_pidfile = args.pid
if args.cycle:
_cycle = float(args.cycle)
if args.socketport:
_socketport = args.socketport
_socket_port = int(_socket_port)
jeedom_utils.set_log_level(_log_level)
logging.info('Start demond')
logging.info('Log level : '+str(_log_level))
logging.info('Socket port : '+str(_socket_port))
logging.info('Socket host : '+str(_socket_host))
logging.info('PID file : '+str(_pidfile))
logging.info('Apikey : '+str(_apikey))
logging.info('Device : '+str(_device))
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
try:
jeedom_utils.write_pid(str(_pidfile))
jeedom_socket = jeedom_socket(port=_socket_port,address=_socket_host)
listen()
except Exception as e:
logging.error('Fatal error : '+str(e))
logging.info(traceback.format_exc())
shutdown()
|
jeedom/plugin-template
|
resources/demond/demond.py
|
Python
|
gpl-2.0
| 3,838
|
[
"Desmond"
] |
45e98175697e5271fb595a07c869bddfbfb0b24526481a6e2d84285aabb363a7
|
import numpy as np
import netCDF4 as nc
from .ncutil import nc_copy
def reduce_kpoints(subset, fname_in, fname_out):
"""
Reduce the number of kpoints by selecting a subset of indices.
Read the file fname_in and write the reduced data in fname_out.
Use the name of fname_in to determine the type of file, with possible
extentions: EIG.nc, EIGR2D.nc, EIGI2D.nc, GKK.nc, FAN.nc
"""
if fname_in.endswith('EIG.nc'):
return reduce_kpoints_eig(subset, fname_in, fname_out)
elif fname_in.endswith('EIGR2D.nc') or fname_in.endswith('EIGI2D.nc'):
return reduce_kpoints_eigr2d(subset, fname_in, fname_out)
elif fname_in.endswith('GKK.nc'):
return reduce_kpoints_gkk(subset, fname_in, fname_out)
elif fname_in.endswith('FAN.nc'):
return reduce_kpoints_fan(subset, fname_in, fname_out)
else:
raise Exception('Unrecognized file type for file {}'.format(fname_in))
def reduce_kpoints_eig(subset, fname_in, fname_out):
"""
Operates on a EIG.nc file.
Reduce the number of kpoints by selecting a subset of indices.
Read the file fname_in and write the reduced data in fname_out.
"""
# Name of the dimensions for nkpt in the nc file
dname = 'nkpt'
# Name of the variables with a nkpt dimension
varnames = (
'Eigenvalues',
'Kptns',
'NBandK',
)
reduce_dim(subset, dname, varnames, fname_in, fname_out)
def reduce_kpoints_eigr2d(subset, fname_in, fname_out):
"""
Operates on a EIGR2D.nc or EIGI2D file.
Reduce the number of kpoints by selecting a subset of indices.
Read the file fname_in and write the reduced data in fname_out.
"""
# Name of the dimensions for nkpt in the nc file
dname = 'number_of_kpoints'
# Name of the variables with a nkpt dimension
varnames = (
'reduced_coordinates_of_kpoints',
'kpoint_weights',
'number_of_states',
'eigenvalues',
'occupations',
'istwfk',
'second_derivative_eigenenergies',
)
reduce_dim(subset, dname, varnames, fname_in, fname_out)
def reduce_kpoints_gkk(subset, fname_in, fname_out):
"""
Operates on a GKK.nc file.
Reduce the number of kpoints by selecting a subset of indices.
Read the file fname_in and write the reduced data in fname_out.
"""
# Name of the dimensions for nkpt in the nc file
dname = 'number_of_kpoints'
# Name of the variables with a nkpt dimension
varnames = (
'reduced_coordinates_of_kpoints',
'kpoint_weights',
'number_of_states',
'eigenvalues',
'occupations',
'istwfk',
'second_derivative_eigenenergies_actif',
)
reduce_dim(subset, dname, varnames, fname_in, fname_out)
def reduce_kpoints_fan(subset, fname_in, fname_out):
"""
Operates on a FAN.nc file.
Reduce the number of kpoints by selecting a subset of indices.
Read the file fname_in and write the reduced data in fname_out.
"""
# Name of the dimensions for nkpt in the nc file
dname = 'number_of_kpoints'
# Name of the variables with a nkpt dimension
varnames = (
'reduced_coordinates_of_kpoints',
'kpoint_weights',
'number_of_states',
'eigenvalues',
'occupations',
'istwfk',
'second_derivative_eigenenergies_actif',
)
reduce_dim(subset, dname, varnames, fname_in, fname_out)
def reduce_dim(subset, dname, varnames, fname_in, fname_out):
"""
Copy a netcdf file from fname_in into fname_out,
but reduce the dimension 'dname'
"""
newdim = len(subset)
with nc.Dataset(fname_in, 'r') as dsin:
with nc.Dataset(fname_out, 'w') as dsout:
nc_copy(dsin, dsout,
except_dimensions=[dname],
except_variables=varnames)
dsout.createDimension(dname, newdim)
for varname in varnames:
datatype = dsin.variables[varname].datatype
dimensions = dsin.variables[varname].dimensions
axis = dimensions.index(dname)
data = dsout.createVariable(varname, datatype, dimensions)
data[...] = np.take(dsin.variables[varname][...], subset, axis)
|
jmbeuken/abinit
|
scripts/post_processing/ElectronPhononCoupling/ElectronPhononCoupling/util/reduce_kpoints.py
|
Python
|
gpl-3.0
| 4,306
|
[
"NetCDF"
] |
b97046b50408735ab10178cc748841327dc3bcf90cd51187d9b7826cc13b4670
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
PDB Topology Parser
=========================================================================
This topology parser uses a standard PDB file to build a minimum
internal structure representation (list of atoms).
The topology reader reads a PDB file line by line and ignores atom
numbers but only reads residue numbers up to 9,999 correctly. If you
have systems containing at least 10,000 residues then you need to use
a different file format (e.g. the "extended" PDB, *XPDB* format, see
:mod:`~MDAnalysis.topology.ExtendedPDBParser`) that can handle residue
numbers up to 99,999.
.. Note::
The parser processes atoms and their names. Masses are guessed and set to 0
if unknown. Partial charges are not set. Elements are parsed if they are
valid. If partially missing or incorrect, empty records are assigned.
See Also
--------
* :mod:`MDAnalysis.topology.ExtendedPDBParser`
* :class:`MDAnalysis.coordinates.PDB.PDBReader`
* :class:`MDAnalysis.core.universe.Universe`
Classes
-------
.. autoclass:: PDBParser
:members:
:inherited-members:
"""
import numpy as np
import warnings
from .guessers import guess_masses, guess_types
from .tables import SYMB2Z
from ..lib import util
from .base import TopologyReaderBase, change_squash
from ..core.topology import Topology
from ..core.topologyattrs import (
Atomnames,
Atomids,
AltLocs,
Bonds,
ChainIDs,
Atomtypes,
Elements,
ICodes,
Masses,
Occupancies,
RecordTypes,
Resids,
Resnames,
Resnums,
Segids,
Tempfactors,
)
def float_or_default(val, default):
try:
return float(val)
except ValueError:
return default
DIGITS_UPPER = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
DIGITS_LOWER = DIGITS_UPPER.lower()
DIGITS_UPPER_VALUES = dict([pair for pair in zip(DIGITS_UPPER, range(36))])
DIGITS_LOWER_VALUES = dict([pair for pair in zip(DIGITS_LOWER, range(36))])
def decode_pure(digits_values, s):
"""Decodes the string s using the digit, value associations for each
character.
Parameters
----------
digits_values: dict
A dictionary containing the base-10 numbers that each hexadecimal
number corresponds to.
s: str
The contents of the pdb index columns.
Returns
-------
The integer in base-10 corresponding to traditional base-36.
"""
result = 0
n = len(digits_values)
for c in s:
result *= n
result += digits_values[c]
return result
def hy36decode(width, s):
"""
Decodes base-10/upper-case base-36/lower-case base-36 hybrid.
Parameters
----------
width: int
The number of columns in the pdb file store atom index.
s: str
The contents of the pdb index columns.
Returns
-------
int
Base-10 integer corresponding to hybrid36.
"""
if (len(s) == width):
f = s[0]
if (f == "-" or f == " " or f.isdigit()):
return int(s)
elif (f in DIGITS_UPPER_VALUES):
return decode_pure(digits_values=DIGITS_UPPER_VALUES,
s=s) - 10 * 36 ** (width - 1) + 10 ** width
elif (f in DIGITS_LOWER_VALUES):
return decode_pure(digits_values=DIGITS_LOWER_VALUES,
s=s) + 16 * 36 ** (width - 1) + 10 ** width
raise ValueError("invalid number literal.")
class PDBParser(TopologyReaderBase):
"""Parser that obtains a list of atoms from a standard PDB file.
Creates the following Attributes:
- names
- chainids
- tempfactors
- occupancies
- record_types (ATOM/HETATM)
- resids
- resnames
- segids
- elements
- bonds
Guesses the following Attributes:
- masses
See Also
--------
:class:`MDAnalysis.coordinates.PDB.PDBReader`
.. versionadded:: 0.8
.. versionchanged:: 0.18.0
Added parsing of Record types
.. versionchanged:: 1.0.0
Added parsing of valid Elements
.. versionchanged:: 2.0.0
Bonds attribute is not added if no bonds are present in PDB file.
If elements are invalid or partially missing, empty elements records
are now assigned (Issue #2422).
Aliased ``bfactors`` topologyattribute to ``tempfactors``.
``bfactors`` is deprecated and will be removed in 3.0 (Issue #1901)
"""
format = ['PDB', 'ENT']
def parse(self, **kwargs):
"""Parse atom information from PDB file
Returns
-------
MDAnalysis Topology object
"""
top = self._parseatoms()
try:
bonds = self._parsebonds(top.ids.values)
except AttributeError:
warnings.warn("Invalid atom serials were present, "
"bonds will not be parsed")
else:
# Issue 2832: don't append Bonds if there are no bonds
if bonds:
top.add_TopologyAttr(bonds)
return top
def _parseatoms(self):
"""Create the initial Topology object"""
resid_prev = 0 # resid looping hack
record_types = []
serials = []
names = []
altlocs = []
chainids = []
icodes = []
tempfactors = []
occupancies = []
resids = []
resnames = []
segids = []
elements = []
self._wrapped_serials = False # did serials go over 100k?
last_wrapped_serial = 100000 # if serials wrap, start from here
with util.openany(self.filename) as f:
for line in f:
line = line.strip() # Remove extra spaces
if not line: # Skip line if empty
continue
if line.startswith('END'):
break
if not line.startswith(('ATOM', 'HETATM')):
continue
record_types.append(line[:6].strip())
try:
serial = int(line[6:11])
except:
try:
serial = hy36decode(5, line[6:11])
except ValueError:
# serial can become '***' when they get too high
self._wrapped_serials = True
serial = last_wrapped_serial
last_wrapped_serial += 1
finally:
serials.append(serial)
names.append(line[12:16].strip())
altlocs.append(line[16:17].strip())
resnames.append(line[17:21].strip())
chainids.append(line[21:22].strip())
elements.append(line[76:78].strip())
# Resids are optional
try:
if self.format == "XPDB": # fugly but keeps code DRY
# extended non-standard format used by VMD
resid = int(line[22:27])
else:
resid = int(line[22:26])
# Wrapping
while resid - resid_prev < -5000:
resid += 10000
resid_prev = resid
except ValueError:
warnings.warn("PDB file is missing resid information. "
"Defaulted to '1'")
resid = 1
finally:
resids.append(resid)
icodes.append(line[26:27].strip())
occupancies.append(float_or_default(line[54:60], 0.0))
tempfactors.append(float_or_default(line[60:66], 1.0)) # AKA bfactor
segids.append(line[66:76].strip())
# Warn about wrapped serials
if self._wrapped_serials:
warnings.warn("Serial numbers went over 100,000. "
"Higher serials have been guessed")
# If segids not present, try to use chainids
if not any(segids):
segids = chainids
n_atoms = len(serials)
attrs = []
# Make Atom TopologyAttrs
for vals, Attr, dtype in (
(names, Atomnames, object),
(altlocs, AltLocs, object),
(chainids, ChainIDs, object),
(record_types, RecordTypes, object),
(serials, Atomids, np.int32),
(tempfactors, Tempfactors, np.float32),
(occupancies, Occupancies, np.float32),
):
attrs.append(Attr(np.array(vals, dtype=dtype)))
# Guessed attributes
# masses from types if they exist
# OPT: We do this check twice, maybe could refactor to avoid this
if not any(elements):
atomtypes = guess_types(names)
attrs.append(Atomtypes(atomtypes, guessed=True))
warnings.warn("Element information is missing, elements attribute "
"will not be populated. If needed these can be "
"guessed using MDAnalysis.topology.guessers.")
else:
# Feed atomtypes as raw element column, but validate elements
atomtypes = elements
attrs.append(Atomtypes(np.array(elements, dtype=object)))
validated_elements = []
for elem in elements:
if elem.capitalize() in SYMB2Z:
validated_elements.append(elem.capitalize())
else:
wmsg = (f"Unknown element {elem} found for some atoms. "
f"These have been given an empty element record. "
f"If needed they can be guessed using "
f"MDAnalysis.topology.guessers.")
warnings.warn(wmsg)
validated_elements.append('')
attrs.append(Elements(np.array(validated_elements, dtype=object)))
masses = guess_masses(atomtypes)
attrs.append(Masses(masses, guessed=True))
# Residue level stuff from here
resids = np.array(resids, dtype=np.int32)
resnames = np.array(resnames, dtype=object)
if self.format == 'XPDB': # XPDB doesn't have icodes
icodes = [''] * n_atoms
icodes = np.array(icodes, dtype=object)
resnums = resids.copy()
segids = np.array(segids, dtype=object)
residx, (resids, resnames, icodes, resnums, segids) = change_squash(
(resids, resnames, icodes, segids), (resids, resnames, icodes, resnums, segids))
n_residues = len(resids)
attrs.append(Resnums(resnums))
attrs.append(Resids(resids))
attrs.append(Resnums(resids.copy()))
attrs.append(ICodes(icodes))
attrs.append(Resnames(resnames))
if any(segids) and not any(val is None for val in segids):
segidx, (segids,) = change_squash((segids,), (segids,))
n_segments = len(segids)
attrs.append(Segids(segids))
else:
n_segments = 1
attrs.append(Segids(np.array(['SYSTEM'], dtype=object)))
segidx = None
top = Topology(n_atoms, n_residues, n_segments,
attrs=attrs,
atom_resindex=residx,
residue_segindex=segidx)
return top
def _parsebonds(self, serials):
# Could optimise this by saving lines in the main loop
# then doing post processing after all Atoms have been read
# ie do one pass through the file only
# Problem is that in multiframe PDB, the CONECT is at end of file,
# so the "break" call happens before bonds are reached.
# If the serials wrapped, this won't work
if self._wrapped_serials:
warnings.warn("Invalid atom serials were present, bonds will not"
" be parsed")
raise AttributeError # gets caught in parse
# Mapping between the atom array indicies a.index and atom ids
# (serial) in the original PDB file
mapping = dict((s, i) for i, s in enumerate(serials))
bonds = set()
with util.openany(self.filename) as f:
lines = (line for line in f if line[:6] == "CONECT")
for line in lines:
atom, atoms = _parse_conect(line.strip())
for a in atoms:
try:
bond = tuple([mapping[atom], mapping[a]])
except KeyError:
# Bonds to TER records have no mapping
# Ignore these as they are not real atoms
warnings.warn(
"PDB file contained CONECT record to TER entry. "
"These are not included in bonds.")
else:
bonds.add(bond)
bonds = tuple(bonds)
return Bonds(bonds)
def _parse_conect(conect):
"""parse a CONECT record from pdbs
Parameters
----------
conect : str
white space striped CONECT record
Returns
-------
atom_id : int
atom index of bond
bonds : set
atom ids of bonded atoms
Raises
------
RuntimeError
Raised if ``conect`` is not a valid CONECT record
"""
atom_id = int(conect[6:11])
n_bond_atoms = len(conect[11:]) // 5
try:
if len(conect[11:]) % n_bond_atoms != 0:
raise RuntimeError("Bond atoms aren't aligned proberly for CONECT "
"record: {}".format(conect))
except ZeroDivisionError:
# Conect record with only one entry (CONECT A\n)
warnings.warn("Found CONECT record with single entry, ignoring this")
return atom_id, [] # return empty list to allow iteration over nothing
bond_atoms = (int(conect[11 + i * 5: 16 + i * 5]) for i in
range(n_bond_atoms))
return atom_id, bond_atoms
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/topology/PDBParser.py
|
Python
|
gpl-2.0
| 15,051
|
[
"MDAnalysis",
"VMD"
] |
2065290cf9a2c456017ef89cdd8a1f6d4ec3f8d4c63f467b1810aebe8c673799
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from copy import deepcopy
import re
import numpy as np
from scipy import linalg
from .cov import read_cov, _get_whitener_data
from .io.constants import FIFF
from .io.pick import pick_types, channel_type
from .io.proj import make_projector, _needs_eeg_average_ref_proj
from .bem import _fit_sphere
from .evoked import _read_evoked, _aspect_rev, _write_evokeds
from .transforms import (_print_coord_trans, _coord_frame_name,
apply_trans, invert_transform, Transform)
from .viz.evoked import _plot_evoked
from .forward._make_forward import (_get_trans, _setup_bem,
_prep_meg_channels, _prep_eeg_channels)
from .forward._compute_forward import (_compute_forwards_meeg,
_prep_field_computation)
from .externals.six import string_types
from .surface import (transform_surface_to, _normalize_vectors,
_get_ico_surface, _compute_nearest)
from .bem import _bem_find_surface, _bem_explain_surface
from .source_space import (_make_volume_source_space, SourceSpaces,
_points_outside_surface)
from .parallel import parallel_func
from .fixes import partial
from .utils import logger, verbose, _time_mask, warn, _check_fname, check_fname
class Dipole(object):
"""Dipole class for sequential dipole fits
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Used to store positions, orientations, amplitudes, times, goodness of fit
of dipoles, typically obtained with Neuromag/xfit, mne_dipole_fit
or certain inverse solvers. Note that dipole position vectors are given in
the head coordinate frame.
Parameters
----------
times : array, shape (n_dipoles,)
The time instants at which each dipole was fitted (sec).
pos : array, shape (n_dipoles, 3)
The dipoles positions (m) in head coordinates.
amplitude : array, shape (n_dipoles,)
The amplitude of the dipoles (nAm).
ori : array, shape (n_dipoles, 3)
The dipole orientations (normalized to unit length).
gof : array, shape (n_dipoles,)
The goodness of fit.
name : str | None
Name of the dipole.
See Also
--------
read_dipole
DipoleFixed
Notes
-----
This class is for sequential dipole fits, where the position
changes as a function of time. For fixed dipole fits, where the
position is fixed as a function of time, use :class:`mne.DipoleFixed`.
"""
def __init__(self, times, pos, amplitude, ori, gof, name=None):
self.times = np.array(times)
self.pos = np.array(pos)
self.amplitude = np.array(amplitude)
self.ori = np.array(ori)
self.gof = np.array(gof)
self.name = name
def __repr__(self):
s = "n_times : %s" % len(self.times)
s += ", tmin : %s" % np.min(self.times)
s += ", tmax : %s" % np.max(self.times)
return "<Dipole | %s>" % s
def save(self, fname):
"""Save dipole in a .dip file
Parameters
----------
fname : str
The name of the .dip file.
"""
fmt = " %7.1f %7.1f %8.2f %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f %6.1f"
# NB CoordinateSystem is hard-coded as Head here
with open(fname, 'wb') as fid:
fid.write('# CoordinateSystem "Head"\n'.encode('utf-8'))
fid.write('# begin end X (mm) Y (mm) Z (mm)'
' Q(nAm) Qx(nAm) Qy(nAm) Qz(nAm) g/%\n'
.encode('utf-8'))
t = self.times[:, np.newaxis] * 1000.
gof = self.gof[:, np.newaxis]
amp = 1e9 * self.amplitude[:, np.newaxis]
out = np.concatenate((t, t, self.pos / 1e-3, amp,
self.ori * amp, gof), axis=-1)
np.savetxt(fid, out, fmt=fmt)
if self.name is not None:
fid.write(('## Name "%s dipoles" Style "Dipoles"'
% self.name).encode('utf-8'))
def crop(self, tmin=None, tmax=None):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
"""
sfreq = None
if len(self.times) > 1:
sfreq = 1. / np.median(np.diff(self.times))
mask = _time_mask(self.times, tmin, tmax, sfreq=sfreq)
for attr in ('times', 'pos', 'gof', 'amplitude', 'ori'):
setattr(self, attr, getattr(self, attr)[mask])
def copy(self):
"""Copy the Dipoles object
Returns
-------
dip : instance of Dipole
The copied dipole instance.
"""
return deepcopy(self)
@verbose
def plot_locations(self, trans, subject, subjects_dir=None,
bgcolor=(1, 1, 1), opacity=0.3,
brain_color=(1, 1, 0), fig_name=None,
fig_size=(600, 600), mode='cone',
scale_factor=0.1e-1, colors=None, verbose=None):
"""Plot dipole locations as arrows
Parameters
----------
trans : dict
The mri to head trans.
subject : str
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : None | str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
The default is None.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
fig_name : tuple of length 2
Mayavi figure name.
fig_size : tuple of length 2
Mayavi figure size.
mode : str
Should be ``'cone'`` or ``'sphere'`` to specify how the
dipoles should be shown.
scale_factor : float
The scaling applied to amplitudes for the plot.
colors: list of colors | None
Color to plot with each dipole. If None defaults colors are used.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
from .viz import plot_dipole_locations
dipoles = []
for t in self.times:
dipoles.append(self.copy())
dipoles[-1].crop(t, t)
return plot_dipole_locations(
dipoles, trans, subject, subjects_dir, bgcolor, opacity,
brain_color, fig_name, fig_size, mode, scale_factor,
colors)
def plot_amplitudes(self, color='k', show=True):
"""Plot the dipole amplitudes as a function of time
Parameters
----------
color: matplotlib Color
Color to use for the trace.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
"""
from .viz import plot_dipole_amplitudes
return plot_dipole_amplitudes([self], [color], show)
def __getitem__(self, item):
"""Get a time slice
Parameters
----------
item : array-like or slice
The slice of time points to use.
Returns
-------
dip : instance of Dipole
The sliced dipole.
"""
if isinstance(item, int): # make sure attributes stay 2d
item = [item]
selected_times = self.times[item].copy()
selected_pos = self.pos[item, :].copy()
selected_amplitude = self.amplitude[item].copy()
selected_ori = self.ori[item, :].copy()
selected_gof = self.gof[item].copy()
selected_name = self.name
return Dipole(
selected_times, selected_pos, selected_amplitude, selected_ori,
selected_gof, selected_name)
def __len__(self):
"""The number of dipoles
Returns
-------
len : int
The number of dipoles.
Examples
--------
This can be used as::
>>> len(dipoles) # doctest: +SKIP
10
"""
return self.pos.shape[0]
def _read_dipole_fixed(fname):
"""Helper to read a fixed dipole FIF file"""
logger.info('Reading %s ...' % fname)
_check_fname(fname, overwrite=True, must_exist=True)
info, nave, aspect_kind, first, last, comment, times, data = \
_read_evoked(fname)
return DipoleFixed(info, data, times, nave, aspect_kind, first, last,
comment)
class DipoleFixed(object):
"""Dipole class for fixed-position dipole fits
.. note:: This class should usually not be instantiated directly,
instead :func:`mne.read_dipole` should be used.
Parameters
----------
info : instance of Info
The measurement info.
data : array, shape (n_channels, n_times)
The dipole data.
times : array, shape (n_times,)
The time points.
nave : int
Number of averages.
aspect_kind : int
The kind of data.
first : int
First sample.
last : int
Last sample.
comment : str
The dipole comment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
read_dipole
Dipole
Notes
-----
This class is for fixed-position dipole fits, where the position
(and maybe orientation) is static over time. For sequential dipole fits,
where the position can change a function of time, use :class:`mne.Dipole`.
.. versionadded:: 0.12
"""
@verbose
def __init__(self, info, data, times, nave, aspect_kind, first, last,
comment, verbose=None):
self.info = info
self.nave = nave
self._aspect_kind = aspect_kind
self.kind = _aspect_rev.get(str(aspect_kind), 'Unknown')
self.first = first
self.last = last
self.comment = comment
self.times = times
self.data = data
self.verbose = verbose
@property
def ch_names(self):
return self.info['ch_names']
@verbose
def save(self, fname, verbose=None):
"""Save dipole in a .fif file
Parameters
----------
fname : str
The name of the .fif file. Must end with ``'.fif'`` or
``'.fif.gz'`` to make it explicit that the file contains
dipole information in FIF format.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
"""
check_fname(fname, 'DipoleFixed', ('-dip.fif', '-dip.fif.gz'),
('.fif', '.fif.gz'))
_write_evokeds(fname, self, check=False)
def plot(self, show=True):
"""Plot dipole data
Parameters
----------
show : bool
Call pyplot.show() at the end or not.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure containing the time courses.
"""
return _plot_evoked(self, picks=None, exclude=(), unit=True, show=show,
ylim=None, xlim='tight', proj=False, hline=None,
units=None, scalings=None, titles=None, axes=None,
gfp=False, window_title=None, spatial_colors=False,
plot_type="butterfly", selectable=False)
# #############################################################################
# IO
@verbose
def read_dipole(fname, verbose=None):
"""Read .dip file from Neuromag/xfit or MNE
Parameters
----------
fname : str
The name of the .dip or .fif file.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dipole : instance of Dipole or DipoleFixed
The dipole.
See Also
--------
mne.Dipole
mne.DipoleFixed
"""
_check_fname(fname, overwrite=True, must_exist=True)
if fname.endswith('.fif') or fname.endswith('.fif.gz'):
return _read_dipole_fixed(fname)
try:
data = np.loadtxt(fname, comments='%')
except:
data = np.loadtxt(fname, comments='#') # handle 2 types of comments...
name = None
with open(fname, 'r') as fid:
for line in fid.readlines():
if line.startswith('##') or line.startswith('%%'):
m = re.search('Name "(.*) dipoles"', line)
if m:
name = m.group(1)
break
if data.ndim == 1:
data = data[None, :]
logger.info("%d dipole(s) found" % len(data))
times = data[:, 0] / 1000.
pos = 1e-3 * data[:, 2:5] # put data in meters
amplitude = data[:, 5]
norm = amplitude.copy()
amplitude /= 1e9
norm[norm == 0] = 1
ori = data[:, 6:9] / norm[:, np.newaxis]
gof = data[:, 9]
return Dipole(times, pos, amplitude, ori, gof, name)
# #############################################################################
# Fitting
def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):
"""Compute the forward solution and do other nice stuff"""
B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)
B = np.concatenate(B, axis=1)
B_orig = B.copy()
# Apply projection and whiten (cov has projections already)
B = np.dot(B, whitener.T)
# column normalization doesn't affect our fitting, so skip for now
# S = np.sum(B * B, axis=1) # across channels
# scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),
# axis=1)), 3)
# B *= scales[:, np.newaxis]
scales = np.ones(3)
return B, B_orig, scales
def _make_guesses(surf_or_rad, r0, grid, exclude, mindist, n_jobs):
"""Make a guess space inside a sphere or BEM surface"""
if isinstance(surf_or_rad, dict):
surf = surf_or_rad
logger.info('Guess surface (%s) is in %s coordinates'
% (_bem_explain_surface(surf['id']),
_coord_frame_name(surf['coord_frame'])))
else:
radius = surf_or_rad[0]
logger.info('Making a spherical guess space with radius %7.1f mm...'
% (1000 * radius))
surf = _get_ico_surface(3)
_normalize_vectors(surf['rr'])
surf['rr'] *= radius
surf['rr'] += r0
logger.info('Filtering (grid = %6.f mm)...' % (1000 * grid))
src = _make_volume_source_space(surf, grid, exclude, 1000 * mindist,
do_neighbors=False, n_jobs=n_jobs)
# simplify the result to make things easier later
src = dict(rr=src['rr'][src['vertno']], nn=src['nn'][src['vertno']],
nuse=src['nuse'], coord_frame=src['coord_frame'],
vertno=np.arange(src['nuse']))
return SourceSpaces([src])
def _fit_eval(rd, B, B2, fwd_svd=None, fwd_data=None, whitener=None):
"""Calculate the residual sum of squares"""
if fwd_svd is None:
fwd = _dipole_forwards(fwd_data, whitener, rd[np.newaxis, :])[0]
uu, sing, vv = linalg.svd(fwd, overwrite_a=True, full_matrices=False)
else:
uu, sing, vv = fwd_svd
gof = _dipole_gof(uu, sing, vv, B, B2)[0]
# mne-c uses fitness=B2-Bm2, but ours (1-gof) is just a normalized version
return 1. - gof
def _dipole_gof(uu, sing, vv, B, B2):
"""Calculate the goodness of fit from the forward SVD"""
ncomp = 3 if sing[2] / sing[0] > 0.2 else 2
one = np.dot(vv[:ncomp], B)
Bm2 = np.sum(one * one)
gof = Bm2 / B2
return gof, one
def _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig, rd, ori=None):
"""Fit the dipole moment once the location is known"""
if 'fwd' in fwd_data:
# should be a single precomputed "guess" (i.e., fixed position)
assert rd is None
fwd = fwd_data['fwd']
assert fwd.shape[0] == 3
fwd_orig = fwd_data['fwd_orig']
assert fwd_orig.shape[0] == 3
scales = fwd_data['scales']
assert scales.shape == (3,)
fwd_svd = fwd_data['fwd_svd'][0]
else:
fwd, fwd_orig, scales = _dipole_forwards(fwd_data, whitener,
rd[np.newaxis, :])
fwd_svd = None
if ori is None:
if fwd_svd is None:
fwd_svd = linalg.svd(fwd, full_matrices=False)
uu, sing, vv = fwd_svd
gof, one = _dipole_gof(uu, sing, vv, B, B2)
ncomp = len(one)
# Counteract the effect of column normalization
Q = scales[0] * np.sum(uu.T[:ncomp] *
(one / sing[:ncomp])[:, np.newaxis], axis=0)
else:
fwd = np.dot(ori[np.newaxis], fwd)
sing = np.linalg.norm(fwd)
one = np.dot(fwd / sing, B)
gof = (one * one)[0] / B2
Q = ori * (scales[0] * np.sum(one / sing))
B_residual = _compute_residual(proj_op, B_orig, fwd_orig, Q)
return Q, gof, B_residual
def _compute_residual(proj_op, B_orig, fwd_orig, Q):
"""Compute the residual"""
# apply the projector to both elements
return np.dot(proj_op, B_orig) - np.dot(np.dot(Q, fwd_orig), proj_op.T)
def _fit_dipoles(fun, min_dist_to_inner_skull, data, times, guess_rrs,
guess_data, fwd_data, whitener, proj_op, ori, n_jobs):
"""Fit a single dipole to the given whitened, projected data"""
from scipy.optimize import fmin_cobyla
parallel, p_fun, _ = parallel_func(fun, n_jobs)
# parallel over time points
res = parallel(p_fun(min_dist_to_inner_skull, B, t, guess_rrs,
guess_data, fwd_data, whitener, proj_op,
fmin_cobyla, ori)
for B, t in zip(data.T, times))
pos = np.array([r[0] for r in res])
amp = np.array([r[1] for r in res])
ori = np.array([r[2] for r in res])
gof = np.array([r[3] for r in res]) * 100 # convert to percentage
residual = np.array([r[4] for r in res]).T
return pos, amp, ori, gof, residual
'''Simplex code in case we ever want/need it for testing
def _make_tetra_simplex():
"""Make the initial tetrahedron"""
#
# For this definition of a regular tetrahedron, see
#
# http://mathworld.wolfram.com/Tetrahedron.html
#
x = np.sqrt(3.0) / 3.0
r = np.sqrt(6.0) / 12.0
R = 3 * r
d = x / 2.0
simplex = 1e-2 * np.array([[x, 0.0, -r],
[-d, 0.5, -r],
[-d, -0.5, -r],
[0., 0., R]])
return simplex
def try_(p, y, psum, ndim, fun, ihi, neval, fac):
"""Helper to try a value"""
ptry = np.empty(ndim)
fac1 = (1.0 - fac) / ndim
fac2 = fac1 - fac
ptry = psum * fac1 - p[ihi] * fac2
ytry = fun(ptry)
neval += 1
if ytry < y[ihi]:
y[ihi] = ytry
psum[:] += ptry - p[ihi]
p[ihi] = ptry
return ytry, neval
def _simplex_minimize(p, ftol, stol, fun, max_eval=1000):
"""Minimization with the simplex algorithm
Modified from Numerical recipes"""
y = np.array([fun(s) for s in p])
ndim = p.shape[1]
assert p.shape[0] == ndim + 1
mpts = ndim + 1
neval = 0
psum = p.sum(axis=0)
loop = 1
while(True):
ilo = 1
if y[1] > y[2]:
ihi = 1
inhi = 2
else:
ihi = 2
inhi = 1
for i in range(mpts):
if y[i] < y[ilo]:
ilo = i
if y[i] > y[ihi]:
inhi = ihi
ihi = i
elif y[i] > y[inhi]:
if i != ihi:
inhi = i
rtol = 2 * np.abs(y[ihi] - y[ilo]) / (np.abs(y[ihi]) + np.abs(y[ilo]))
if rtol < ftol:
break
if neval >= max_eval:
raise RuntimeError('Maximum number of evaluations exceeded.')
if stol > 0: # Has the simplex collapsed?
dsum = np.sqrt(np.sum((p[ilo] - p[ihi]) ** 2))
if loop > 5 and dsum < stol:
break
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, -1.)
if ytry <= y[ilo]:
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 2.)
elif ytry >= y[inhi]:
ysave = y[ihi]
ytry, neval = try_(p, y, psum, ndim, fun, ihi, neval, 0.5)
if ytry >= ysave:
for i in range(mpts):
if i != ilo:
psum[:] = 0.5 * (p[i] + p[ilo])
p[i] = psum
y[i] = fun(psum)
neval += ndim
psum = p.sum(axis=0)
loop += 1
'''
def _surface_constraint(rd, surf, min_dist_to_inner_skull):
"""Surface fitting constraint"""
dist = _compute_nearest(surf['rr'], rd[np.newaxis, :],
return_dists=True)[1][0]
if _points_outside_surface(rd[np.newaxis, :], surf, 1)[0]:
dist *= -1.
# Once we know the dipole is below the inner skull,
# let's check if its distance to the inner skull is at least
# min_dist_to_inner_skull. This can be enforced by adding a
# constrain proportional to its distance.
dist -= min_dist_to_inner_skull
return dist
def _sphere_constraint(rd, r0, R_adj):
"""Sphere fitting constraint"""
return R_adj - np.sqrt(np.sum((rd - r0) ** 2))
def _fit_dipole(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, proj_op,
fmin_cobyla, ori):
"""Fit a single bit of data"""
B = np.dot(whitener, B_orig)
# make constraint function to keep the solver within the inner skull
if isinstance(fwd_data['inner_skull'], dict): # bem
surf = fwd_data['inner_skull']
constraint = partial(_surface_constraint, surf=surf,
min_dist_to_inner_skull=min_dist_to_inner_skull)
else: # sphere
surf = None
R, r0 = fwd_data['inner_skull']
constraint = partial(_sphere_constraint, r0=r0,
R_adj=R - min_dist_to_inner_skull)
del R, r0
# Find a good starting point (find_best_guess in C)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0, B
idx = np.argmin([_fit_eval(guess_rrs[[fi], :], B, B2, fwd_svd)
for fi, fwd_svd in enumerate(guess_data['fwd_svd'])])
x0 = guess_rrs[idx]
fun = partial(_fit_eval, B=B, B2=B2, fwd_data=fwd_data, whitener=whitener)
# Tested minimizers:
# Simplex, BFGS, CG, COBYLA, L-BFGS-B, Powell, SLSQP, TNC
# Several were similar, but COBYLA won for having a handy constraint
# function we can use to ensure we stay inside the inner skull /
# smallest sphere
rd_final = fmin_cobyla(fun, x0, (constraint,), consargs=(),
rhobeg=5e-2, rhoend=5e-5, disp=False)
# simplex = _make_tetra_simplex() + x0
# _simplex_minimize(simplex, 1e-4, 2e-4, fun)
# rd_final = simplex[0]
# Compute the dipole moment at the final point
Q, gof, residual = _fit_Q(fwd_data, whitener, proj_op, B, B2, B_orig,
rd_final, ori=ori)
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
msg = '---- Fitted : %7.1f ms' % (1000. * t)
if surf is not None:
dist_to_inner_skull = _compute_nearest(
surf['rr'], rd_final[np.newaxis, :], return_dists=True)[1][0]
msg += (", distance to inner skull : %2.4f mm"
% (dist_to_inner_skull * 1000.))
logger.info(msg)
return rd_final, amp, ori, gof, residual
def _fit_dipole_fixed(min_dist_to_inner_skull, B_orig, t, guess_rrs,
guess_data, fwd_data, whitener, proj_op,
fmin_cobyla, ori):
"""Fit a data using a fixed position"""
B = np.dot(whitener, B_orig)
B2 = np.dot(B, B)
if B2 == 0:
warn('Zero field found for time %s' % t)
return np.zeros(3), 0, np.zeros(3), 0
# Compute the dipole moment
Q, gof, residual = _fit_Q(guess_data, whitener, proj_op, B, B2, B_orig,
rd=None, ori=ori)
if ori is None:
amp = np.sqrt(np.dot(Q, Q))
norm = 1. if amp == 0. else amp
ori = Q / norm
else:
amp = np.dot(Q, ori)
# No corresponding 'logger' message here because it should go *very* fast
return guess_rrs[0], amp, ori, gof, residual
@verbose
def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1,
pos=None, ori=None, verbose=None):
"""Fit a dipole
Parameters
----------
evoked : instance of Evoked
The dataset to fit.
cov : str | instance of Covariance
The noise covariance.
bem : str | instance of ConductorModel
The BEM filename (str) or conductor model.
trans : str | None
The head<->MRI transform filename. Must be provided unless BEM
is a sphere model.
min_dist : float
Minimum distance (in milimeters) from the dipole to the inner skull.
Must be positive. Note that because this is a constraint passed to
a solver it is not strict but close, i.e. for a ``min_dist=5.`` the
fits could be 4.9 mm from the inner skull.
n_jobs : int
Number of jobs to run in parallel (used in field computation
and fitting).
pos : ndarray, shape (3,) | None
Position of the dipole to use. If None (default), sequential
fitting (different position and orientation for each time instance)
is performed. If a position (in head coords) is given as an array,
the position is fixed during fitting.
.. versionadded:: 0.12
ori : ndarray, shape (3,) | None
Orientation of the dipole to use. If None (default), the
orientation is free to change as a function of time. If an
orientation (in head coordinates) is given as an array, ``pos``
must also be provided, and the routine computes the amplitude and
goodness of fit of the dipole at the given position and orientation
for each time instant.
.. versionadded:: 0.12
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
dip : instance of Dipole or DipoleFixed
The dipole fits. A :class:`mne.DipoleFixed` is returned if
``pos`` and ``ori`` are both not None.
residual : ndarray, shape (n_meeg_channels, n_times)
The good M-EEG data channels with the fitted dipolar activity
removed.
See Also
--------
mne.beamformer.rap_music
Notes
-----
.. versionadded:: 0.9.0
"""
# This could eventually be adapted to work with other inputs, these
# are what is needed:
evoked = evoked.copy()
# Determine if a list of projectors has an average EEG ref
if _needs_eeg_average_ref_proj(evoked.info):
raise ValueError('EEG average reference is mandatory for dipole '
'fitting.')
if min_dist < 0:
raise ValueError('min_dist should be positive. Got %s' % min_dist)
if ori is not None and pos is None:
raise ValueError('pos must be provided if ori is not None')
data = evoked.data
info = evoked.info
times = evoked.times.copy()
comment = evoked.comment
# Convert the min_dist to meters
min_dist_to_inner_skull = min_dist / 1000.
del min_dist
# Figure out our inputs
neeg = len(pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=[]))
if isinstance(bem, string_types):
bem_extra = bem
else:
bem_extra = repr(bem)
logger.info('BEM : %s' % bem_extra)
if trans is not None:
logger.info('MRI transform : %s' % trans)
mri_head_t, trans = _get_trans(trans)
else:
mri_head_t = Transform('head', 'mri', np.eye(4))
bem = _setup_bem(bem, bem_extra, neeg, mri_head_t, verbose=False)
if not bem['is_sphere']:
if trans is None:
raise ValueError('mri must not be None if BEM is provided')
# Find the best-fitting sphere
inner_skull = _bem_find_surface(bem, 'inner_skull')
inner_skull = inner_skull.copy()
R, r0 = _fit_sphere(inner_skull['rr'], disp=False)
# r0 back to head frame for logging
r0 = apply_trans(mri_head_t['trans'], r0[np.newaxis, :])[0]
logger.info('Head origin : '
'%6.1f %6.1f %6.1f mm rad = %6.1f mm.'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], 1000 * R))
else:
r0 = bem['r0']
if len(bem.get('layers', [])) > 0:
R = bem['layers'][0]['rad']
kind = 'rad'
else: # MEG-only
# Use the minimum distance to the MEG sensors as the radius then
R = np.dot(linalg.inv(info['dev_head_t']['trans']),
np.hstack([r0, [1.]]))[:3] # r0 -> device
R = R - [info['chs'][pick]['loc'][:3]
for pick in pick_types(info, meg=True, exclude=[])]
if len(R) == 0:
raise RuntimeError('No MEG channels found, but MEG-only '
'sphere model used')
R = np.min(np.sqrt(np.sum(R * R, axis=1))) # use dist to sensors
kind = 'max_rad'
logger.info('Sphere model : origin at (% 7.2f % 7.2f % 7.2f) mm, '
'%s = %6.1f mm'
% (1000 * r0[0], 1000 * r0[1], 1000 * r0[2], kind, R))
inner_skull = [R, r0] # NB sphere model defined in head frame
r0_mri = apply_trans(invert_transform(mri_head_t)['trans'],
r0[np.newaxis, :])[0]
accurate = False # can be an option later (shouldn't make big diff)
# Deal with DipoleFixed cases here
if pos is not None:
fixed_position = True
pos = np.array(pos, float)
if pos.shape != (3,):
raise ValueError('pos must be None or a 3-element array-like,'
' got %s' % (pos,))
logger.info('Fixed position : %6.1f %6.1f %6.1f mm'
% tuple(1000 * pos))
if ori is not None:
ori = np.array(ori, float)
if ori.shape != (3,):
raise ValueError('oris must be None or a 3-element array-like,'
' got %s' % (ori,))
norm = np.sqrt(np.sum(ori * ori))
if not np.isclose(norm, 1):
raise ValueError('ori must be a unit vector, got length %s'
% (norm,))
logger.info('Fixed orientation : %6.4f %6.4f %6.4f mm'
% tuple(ori))
else:
logger.info('Free orientation : <time-varying>')
fit_n_jobs = 1 # only use 1 job to do the guess fitting
else:
fixed_position = False
# Eventually these could be parameters, but they are just used for
# the initial grid anyway
guess_grid = 0.02 # MNE-C uses 0.01, but this is faster w/similar perf
guess_mindist = max(0.005, min_dist_to_inner_skull)
guess_exclude = 0.02
logger.info('Guess grid : %6.1f mm' % (1000 * guess_grid,))
if guess_mindist > 0.0:
logger.info('Guess mindist : %6.1f mm'
% (1000 * guess_mindist,))
if guess_exclude > 0:
logger.info('Guess exclude : %6.1f mm'
% (1000 * guess_exclude,))
logger.info('Using %s MEG coil definitions.'
% ("accurate" if accurate else "standard"))
fit_n_jobs = n_jobs
if isinstance(cov, string_types):
logger.info('Noise covariance : %s' % (cov,))
cov = read_cov(cov, verbose=False)
logger.info('')
_print_coord_trans(mri_head_t)
_print_coord_trans(info['dev_head_t'])
logger.info('%d bad channels total' % len(info['bads']))
# Forward model setup (setup_forward_model from setup.c)
ch_types = [channel_type(info, idx) for idx in range(info['nchan'])]
megcoils, compcoils, megnames, meg_info = [], [], [], None
eegels, eegnames = [], []
if 'grad' in ch_types or 'mag' in ch_types:
megcoils, compcoils, megnames, meg_info = \
_prep_meg_channels(info, exclude='bads',
accurate=accurate, verbose=verbose)
if 'eeg' in ch_types:
eegels, eegnames = _prep_eeg_channels(info, exclude='bads',
verbose=verbose)
# Ensure that MEG and/or EEG channels are present
if len(megcoils + eegels) == 0:
raise RuntimeError('No MEG or EEG channels found.')
# Whitener for the data
logger.info('Decomposing the sensor noise covariance matrix...')
picks = pick_types(info, meg=True, eeg=True, ref_meg=False)
# In case we want to more closely match MNE-C for debugging:
# from .io.pick import pick_info
# from .cov import prepare_noise_cov
# info_nb = pick_info(info, picks)
# cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False)
# nzero = (cov['eig'] > 0)
# n_chan = len(info_nb['ch_names'])
# whitener = np.zeros((n_chan, n_chan), dtype=np.float)
# whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero])
# whitener = np.dot(whitener, cov['eigvec'])
whitener = _get_whitener_data(info, cov, picks, verbose=False)
# Proceed to computing the fits (make_guess_data)
if fixed_position:
guess_src = dict(nuse=1, rr=pos[np.newaxis], inuse=np.array([True]))
logger.info('Compute forward for dipole location...')
else:
logger.info('\n---- Computing the forward solution for the guesses...')
guess_src = _make_guesses(inner_skull, r0_mri,
guess_grid, guess_exclude, guess_mindist,
n_jobs=n_jobs)[0]
# grid coordinates go from mri to head frame
transform_surface_to(guess_src, 'head', mri_head_t)
logger.info('Go through all guess source locations...')
# inner_skull goes from mri to head frame
if isinstance(inner_skull, dict):
transform_surface_to(inner_skull, 'head', mri_head_t)
if fixed_position:
if isinstance(inner_skull, dict):
check = _surface_constraint(pos, inner_skull,
min_dist_to_inner_skull)
else:
check = _sphere_constraint(pos, r0,
R_adj=R - min_dist_to_inner_skull)
if check <= 0:
raise ValueError('fixed position is %0.1fmm outside the inner '
'skull boundary' % (-1000 * check,))
# C code computes guesses w/sphere model for speed, don't bother here
fwd_data = dict(coils_list=[megcoils, eegels], infos=[meg_info, None],
ccoils_list=[compcoils, None], coil_types=['meg', 'eeg'],
inner_skull=inner_skull)
# fwd_data['inner_skull'] in head frame, bem in mri, confusing...
_prep_field_computation(guess_src['rr'], bem, fwd_data, n_jobs,
verbose=False)
guess_fwd, guess_fwd_orig, guess_fwd_scales = _dipole_forwards(
fwd_data, whitener, guess_src['rr'], n_jobs=fit_n_jobs)
# decompose ahead of time
guess_fwd_svd = [linalg.svd(fwd, overwrite_a=False, full_matrices=False)
for fwd in np.array_split(guess_fwd,
len(guess_src['rr']))]
guess_data = dict(fwd=guess_fwd, fwd_svd=guess_fwd_svd,
fwd_orig=guess_fwd_orig, scales=guess_fwd_scales)
del guess_fwd, guess_fwd_svd, guess_fwd_orig, guess_fwd_scales # destroyed
pl = '' if guess_src['nuse'] == 1 else 's'
logger.info('[done %d source%s]' % (guess_src['nuse'], pl))
# Do actual fits
data = data[picks]
ch_names = [info['ch_names'][p] for p in picks]
proj_op = make_projector(info['projs'], ch_names, info['bads'])[0]
fun = _fit_dipole_fixed if fixed_position else _fit_dipole
out = _fit_dipoles(
fun, min_dist_to_inner_skull, data, times, guess_src['rr'],
guess_data, fwd_data, whitener, proj_op, ori, n_jobs)
if fixed_position and ori is not None:
# DipoleFixed
data = np.array([out[1], out[3]])
out_info = deepcopy(info)
loc = np.concatenate([pos, ori, np.zeros(6)])
out_info['chs'] = [
dict(ch_name='dip 01', loc=loc, kind=FIFF.FIFFV_DIPOLE_WAVE,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN, unit=FIFF.FIFF_UNIT_AM,
coil_type=FIFF.FIFFV_COIL_DIPOLE,
unit_mul=0, range=1, cal=1., scanno=1, logno=1),
dict(ch_name='goodness', loc=np.zeros(12),
kind=FIFF.FIFFV_GOODNESS_FIT, unit=FIFF.FIFF_UNIT_AM,
coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
coil_type=FIFF.FIFFV_COIL_NONE,
unit_mul=0, range=1., cal=1., scanno=2, logno=100)]
for key in ['hpi_meas', 'hpi_results', 'projs']:
out_info[key] = list()
for key in ['acq_pars', 'acq_stim', 'description', 'dig',
'experimenter', 'hpi_subsystem', 'proj_id', 'proj_name',
'subject_info']:
out_info[key] = None
out_info._update_redundant()
out_info._check_consistency()
dipoles = DipoleFixed(out_info, data, times, evoked.nave,
evoked._aspect_kind, evoked.first, evoked.last,
comment)
else:
dipoles = Dipole(times, out[0], out[1], out[2], out[3], comment)
residual = out[4]
logger.info('%d time points fitted' % len(dipoles.times))
return dipoles, residual
def get_phantom_dipoles(kind='elekta'):
"""Get standard phantom dipole locations and orientations
Parameters
----------
kind : str
Get the information for the given system.
``vectorview`` (default)
The Neuromag VectorView phantom.
``122``
The Neuromag-122 phantom. This has the same dipoles
as the VectorView phantom, but in a different order.
Returns
-------
pos : ndarray, shape (n_dipoles, 3)
The dipole positions.
ori : ndarray, shape (n_dipoles, 3)
The dipole orientations.
"""
_valid_types = ('122', 'vectorview')
if not isinstance(kind, string_types) or kind not in _valid_types:
raise ValueError('kind must be one of %s, got %s'
% (_valid_types, kind,))
if kind in ('122', 'vectorview'):
a = np.array([59.7, 48.6, 35.8, 24.8, 37.2, 27.5, 15.8, 7.9])
b = np.array([46.1, 41.9, 38.3, 31.5, 13.9, 16.2, 20, 19.3])
x = np.concatenate((a, [0] * 8, -b, [0] * 8))
y = np.concatenate(([0] * 8, -a, [0] * 8, b))
c = [22.9, 23.5, 25.5, 23.1, 52, 46.4, 41, 33]
d = [44.4, 34, 21.6, 12.7, 62.4, 51.5, 39.1, 27.9]
z = np.concatenate((c, c, d, d))
pos = np.vstack((x, y, z)).T / 1000.
if kind == 122:
reorder = (list(range(8, 16)) + list(range(0, 8)) +
list(range(24, 32) + list(range(16, 24))))
pos = pos[reorder]
# Locs are always in XZ or YZ, and so are the oris. The oris are
# also in the same plane and tangential, so it's easy to determine
# the orientation.
ori = list()
for this_pos in pos:
this_ori = np.zeros(3)
idx = np.where(this_pos == 0)[0]
# assert len(idx) == 1
idx = np.setdiff1d(np.arange(3), idx[0])
this_ori[idx] = (this_pos[idx][::-1] /
np.linalg.norm(this_pos[idx])) * [1, -1]
# Now we have this quality, which we could uncomment to
# double-check:
# np.testing.assert_allclose(np.dot(this_ori, this_pos) /
# np.linalg.norm(this_pos), 0,
# atol=1e-15)
ori.append(this_ori)
ori = np.array(ori)
return pos, ori
|
alexandrebarachant/mne-python
|
mne/dipole.py
|
Python
|
bsd-3-clause
| 41,062
|
[
"Mayavi"
] |
4ac8b483b25d7d66f10162934825cbc6487567e1bc91bfcc22c775ec312fb7cc
|
#! /usr/bin/python
'''pysam - a python module for reading, manipulating and writing
genomic data sets.
pysam is a lightweight wrapper of the htslib C-API and provides
facilities to read and write SAM/BAM/VCF/BCF/BED/GFF/GTF/FASTA/FASTQ
files as well as access to the command line functionality of the
samtools and bcftools packages. The module supports compression and
random access through indexing.
This module provides a low-level wrapper around the htslib C-API as
using cython and a high-level API for convenient access to the data
within standard genomic file formats.
The current version wraps htslib-1.7, samtools-1.7 and bcftools-1.6.
See:
http://www.htslib.org
https://github.com/pysam-developers/pysam
http://pysam.readthedocs.org/en/stable
'''
import collections
import glob
import os
import platform
import re
import subprocess
import sys
import sysconfig
from contextlib import contextmanager
from setuptools import Extension, setup
from cy_build import CyExtension as Extension, cy_build_ext as build_ext
try:
import cython
HAVE_CYTHON = True
except ImportError:
HAVE_CYTHON = False
IS_PYTHON3 = sys.version_info.major >= 3
@contextmanager
def changedir(path):
save_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(save_dir)
def run_configure(option):
try:
retcode = subprocess.call(
" ".join(("./configure", option)),
shell=True)
if retcode != 0:
return False
else:
return True
except OSError as e:
return False
def run_make_print_config():
stdout = subprocess.check_output(["make", "-s", "print-config"])
if IS_PYTHON3:
stdout = stdout.decode("ascii")
make_print_config = {}
for line in stdout.splitlines():
if "=" in line:
row = line.split("=")
if len(row) == 2:
make_print_config.update(
{row[0].strip(): row[1].strip()})
return make_print_config
def configure_library(library_dir, env_options=None, options=[]):
configure_script = os.path.join(library_dir, "configure")
on_rtd = os.environ.get("READTHEDOCS") == "True"
# RTD has no bzip2 development libraries installed:
if on_rtd:
env_options = "--disable-bz2"
if not os.path.exists(configure_script):
raise ValueError(
"configure script {} does not exist".format(configure_script))
with changedir(library_dir):
if env_options is not None:
if run_configure(env_options):
return env_options
for option in options:
if run_configure(option):
return option
return None
def distutils_dir_name(dname):
"""Returns the name of a distutils build directory
see: http://stackoverflow.com/questions/14320220/
testing-python-c-libraries-get-build-path
"""
f = "{dirname}.{platform}-{version[0]}.{version[1]}"
return f.format(dirname=dname,
platform=sysconfig.get_platform(),
version=sys.version_info)
def get_pysam_version():
sys.path.insert(0, "pysam")
import version
return version.__version__
# How to link against HTSLIB
# shared: build shared chtslib from builtin htslib code.
# external: use shared libhts.so compiled outside of
# pysam
# separate: use included htslib and include in each extension
# module. No dependencies between modules and works with
# setup.py install, but wasteful in terms of memory and
# compilation time. Fallback if shared module compilation
# fails.
HTSLIB_MODE = os.environ.get("HTSLIB_MODE", "shared")
HTSLIB_LIBRARY_DIR = os.environ.get("HTSLIB_LIBRARY_DIR", None)
HTSLIB_INCLUDE_DIR = os.environ.get("HTSLIB_INCLUDE_DIR", None)
HTSLIB_CONFIGURE_OPTIONS = os.environ.get("HTSLIB_CONFIGURE_OPTIONS", None)
HTSLIB_SOURCE = None
package_list = ['pysam',
'pysam.include',
'pysam.include.samtools',
'pysam.include.bcftools',
'pysam.include.samtools.win32']
package_dirs = {'pysam': 'pysam',
'pysam.include.samtools': 'samtools',
'pysam.include.bcftools': 'bcftools'}
# list of config files that will be automatically generated should
# they not already exist or be created by configure scripts in the
# subpackages.
config_headers = ["samtools/config.h",
"bcftools/config.h"]
cmdclass = {'build_ext': build_ext}
# If cython is available, the pysam will be built using cython from
# the .pyx files. If no cython is available, the C-files included in the
# distribution will be used.
if HAVE_CYTHON:
print ("# pysam: cython is available - using cythonize if necessary")
source_pattern = "pysam/libc%s.pyx"
else:
print ("# pysam: no cython available - using pre-compiled C")
source_pattern = "pysam/libc%s.c"
# Exit if there are no pre-compiled files and no cython available
fn = source_pattern % "htslib"
if not os.path.exists(fn):
raise ValueError(
"no cython installed, but can not find {}."
"Make sure that cython is installed when building "
"from the repository"
.format(fn))
# exclude sources that contain a main function
EXCLUDE = {
"samtools": (
),
"bcftools": (
"test", "plugins", "peakfit.c",
"peakfit.h",
# needs to renamed, name conflict with samtools reheader
"reheader.c",
"polysomy.c"),
"htslib": (
'htslib/tabix.c',
'htslib/bgzip.c',
'htslib/htsfile.c'),
}
print ("# pysam: htslib mode is {}".format(HTSLIB_MODE))
print ("# pysam: HTSLIB_CONFIGURE_OPTIONS={}".format(
HTSLIB_CONFIGURE_OPTIONS))
htslib_configure_options = None
if HTSLIB_MODE in ['shared', 'separate']:
package_list += ['pysam.include.htslib',
'pysam.include.htslib.htslib']
package_dirs.update({'pysam.include.htslib':'htslib'})
htslib_configure_options = configure_library(
"htslib",
HTSLIB_CONFIGURE_OPTIONS,
["--enable-libcurl",
"--disable-libcurl"])
HTSLIB_SOURCE = "builtin"
print ("# pysam: htslib configure options: {}".format(
str(htslib_configure_options)))
config_headers += ["htslib/config.h"]
if htslib_configure_options is None:
# create empty config.h file
with open("htslib/config.h", "w") as outf:
outf.write(
"/* empty config.h created by pysam */\n")
outf.write(
"/* conservative compilation options */\n")
with changedir("htslib"):
htslib_make_options = run_make_print_config()
for key, value in htslib_make_options.items():
print ("# pysam: htslib_config {}={}".format(key, value))
external_htslib_libraries = ['z']
if "LIBS" in htslib_make_options:
external_htslib_libraries.extend(
[re.sub("^-l", "", x) for x in htslib_make_options["LIBS"].split(" ") if x.strip()])
shared_htslib_sources = [re.sub("\.o", ".c", os.path.join("htslib", x))
for x in
htslib_make_options["LIBHTS_OBJS"].split(" ")]
htslib_sources = []
if HTSLIB_LIBRARY_DIR:
# linking against a shared, externally installed htslib version, no
# sources required for htslib
htslib_sources = []
shared_htslib_sources = []
chtslib_sources = []
htslib_library_dirs = [HTSLIB_LIBRARY_DIR]
htslib_include_dirs = [HTSLIB_INCLUDE_DIR]
external_htslib_libraries = ['z', 'hts']
elif HTSLIB_MODE == 'separate':
# add to each pysam component a separately compiled
# htslib
htslib_sources = shared_htslib_sources
shared_htslib_sources = htslib_sources
htslib_library_dirs = []
htslib_include_dirs = ['htslib']
elif HTSLIB_MODE == 'shared':
# link each pysam component against the same
# htslib built from sources included in the pysam
# package.
htslib_library_dirs = [
"pysam", # when using setup.py develop?
".", # when using setup.py develop?
os.path.join("build", distutils_dir_name("lib"), "pysam")]
htslib_include_dirs = ['htslib']
else:
raise ValueError("unknown HTSLIB value '%s'" % HTSLIB_MODE)
# build config.py
with open(os.path.join("pysam", "config.py"), "w") as outf:
outf.write('HTSLIB = "{}"\n'.format(HTSLIB_SOURCE))
config_values = collections.defaultdict(int)
if HTSLIB_SOURCE == "builtin":
with open(os.path.join("htslib", "config.h")) as inf:
for line in inf:
if line.startswith("#define"):
key, value = re.match(
"#define (\S+)\s+(\S+)", line).groups()
config_values[key] = value
for key in ["ENABLE_PLUGINS",
"HAVE_COMMONCRYPTO",
"HAVE_GMTIME_R",
"HAVE_HMAC",
"HAVE_IRODS",
"HAVE_LIBCURL",
"HAVE_MMAP"]:
outf.write("{} = {}\n".format(key, config_values[key]))
print ("# pysam: config_option: {}={}".format(key, config_values[key]))
# create empty config.h files if they have not been created automatically
# or created by the user:
for fn in config_headers:
if not os.path.exists(fn):
with open(fn, "w") as outf:
outf.write(
"/* empty config.h created by pysam */\n")
outf.write(
"/* conservative compilation options */\n")
#######################################################
# Windows compatibility - untested
if platform.system() == 'Windows':
include_os = ['win32']
os_c_files = ['win32/getopt.c']
extra_compile_args = []
else:
include_os = []
os_c_files = []
# for python 3.4, see for example
# http://stackoverflow.com/questions/25587039/
# error-compiling-rpy2-on-python3-4-due-to-werror-
# declaration-after-statement
extra_compile_args = [
"-Wno-unused",
"-Wno-strict-prototypes",
"-Wno-sign-compare",
"-Wno-error=declaration-after-statement"]
define_macros = []
suffix = sysconfig.get_config_var('EXT_SUFFIX')
if not suffix:
suffix = sysconfig.get_config_var('SO')
internal_htslib_libraries = [
os.path.splitext("chtslib{}".format(suffix))[0]]
internal_samtools_libraries = [
os.path.splitext("csamtools{}".format(suffix))[0],
os.path.splitext("cbcftools{}".format(suffix))[0],
]
internal_pysamutil_libraries = [
os.path.splitext("cutils{}".format(suffix))[0]]
libraries_for_pysam_module = external_htslib_libraries + internal_htslib_libraries + internal_pysamutil_libraries
# Order of modules matters in order to make sure that dependencies are resolved.
# The structures of dependencies is as follows:
# libchtslib: htslib utility functions and htslib itself if builtin is set.
# libcsamtools: samtools code (builtin)
# libcbcftools: bcftools code (builtin)
# libcutils: General utility functions, depends on all of the above
# libcXXX (pysam module): depends on libchtslib and libcutils
# The list below uses the union of include_dirs and library_dirs for
# reasons of simplicity.
modules = [
dict(name="pysam.libchtslib",
sources=[source_pattern % "htslib", "pysam/htslib_util.c"] + shared_htslib_sources + os_c_files,
libraries=external_htslib_libraries),
dict(name="pysam.libcsamtools",
sources=[source_pattern % "samtools"] + glob.glob(os.path.join("samtools", "*.pysam.c")) +
[os.path.join("samtools", "lz4", "lz4.c")] + htslib_sources + os_c_files,
libraries=external_htslib_libraries + internal_htslib_libraries),
dict(name="pysam.libcbcftools",
sources=[source_pattern % "bcftools"] + glob.glob(os.path.join("bcftools", "*.pysam.c")) + htslib_sources + os_c_files,
libraries=external_htslib_libraries + internal_htslib_libraries),
dict(name="pysam.libcutils",
sources=[source_pattern % "utils", "pysam/pysam_util.c"] + htslib_sources + os_c_files,
libraries=external_htslib_libraries + internal_htslib_libraries + internal_samtools_libraries),
dict(name="pysam.libcalignmentfile",
sources=[source_pattern % "alignmentfile"] + htslib_sources + os_c_files,
libraries=libraries_for_pysam_module),
dict(name="pysam.libcsamfile",
sources=[source_pattern % "samfile"] + htslib_sources + os_c_files,
libraries=libraries_for_pysam_module),
dict(name="pysam.libcalignedsegment",
sources=[source_pattern % "alignedsegment"] + htslib_sources + os_c_files,
libraries=libraries_for_pysam_module),
dict(name="pysam.libctabix",
sources=[source_pattern % "tabix"] + htslib_sources + os_c_files,
libraries=libraries_for_pysam_module),
dict(name="pysam.libcfaidx",
sources=[source_pattern % "faidx"] + htslib_sources + os_c_files,
libraries=libraries_for_pysam_module),
dict(name="pysam.libcbcf",
sources=[source_pattern % "bcf"] + htslib_sources + os_c_files,
libraries=libraries_for_pysam_module),
dict(name="pysam.libcbgzf",
sources=[source_pattern % "bgzf"] + htslib_sources + os_c_files,
libraries=libraries_for_pysam_module),
dict(name="pysam.libctabixproxies",
sources=[source_pattern % "tabixproxies"] + htslib_sources + os_c_files,
libraries=libraries_for_pysam_module),
dict(name="pysam.libcvcf",
sources=[source_pattern % "vcf"] + htslib_sources + os_c_files,
libraries=libraries_for_pysam_module),
]
common_options = dict(
language="c",
extra_compile_args=extra_compile_args,
define_macros=define_macros,
# for out-of-tree compilation, use absolute paths
library_dirs=[os.path.abspath(x) for x in ["pysam"] + htslib_library_dirs],
include_dirs=[os.path.abspath(x) for x in htslib_include_dirs + \
["samtools", "samtools/lz4", "bcftools", "pysam", "."] + include_os])
# add common options (in python >3.5, could use n = {**a, **b}
for module in modules:
module.update(**common_options)
classifiers = """
Development Status :: 4 - Beta
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: Python
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
metadata = {
'name': "pysam",
'version': get_pysam_version(),
'description': "pysam",
'long_description': __doc__,
'author': "Andreas Heger",
'author_email': "andreas.heger@gmail.com",
'license': "MIT",
'platforms': ["POSIX", "UNIX", "MacOS"],
'classifiers': [_f for _f in classifiers.split("\n") if _f],
'url': "https://github.com/pysam-developers/pysam",
'packages': package_list,
'requires': ['cython (>=0.21)'],
'ext_modules': [Extension(**opts) for opts in modules],
'cmdclass': cmdclass,
'package_dir': package_dirs,
'package_data': {'': ['*.pxd', '*.h'], },
# do not pack in order to permit linking to csamtools.so
'zip_safe': False,
'use_2to3': True,
}
if __name__ == '__main__':
dist = setup(**metadata)
|
kyleabeauchamp/pysam
|
setup.py
|
Python
|
mit
| 15,401
|
[
"pysam"
] |
83dc9e49bf6309312150fce1dfc8fa15bc45d04656af3fe1f3e930e967681a26
|
from ase import Atoms
from ase.structure import molecule
from ase.parallel import paropen
from gpaw import GPAW, Mixer, MixerDif
from gpaw.utilities.tools import split_formula
cell = [14.4, 14.4, 14.4]
data = paropen('data.txt', 'a')
##Reference from J. Chem. Phys. Vol 120 No. 15, 15 April 2004, page 6898
tpss_de = [
('H2' , 112.9),
('LiH', 59.1),
('OH' , 106.8),
('HF' , 139.1),
('Li2', 22.5),
('LiF', 135.7),
('Be2', 8.1),
('CO' , 254.2),
('N2' , 227.7),
('O2' , 126.9),
('F2' , 46.4),
('P2' , 116.1),
('Cl2', 60.8)
]
exp_bonds_dE = [
('H2' , 0.741,109.5),
('LiH', 1.595,57.8),
('OH' , 0.970,106.4),
('HF' , 0.917,140.8),
('Li2', 2.673,24.4),
('LiF', 1.564,138.9),
('Be2', 2.440,3.0),
('CO' , 1.128,259.3),
('N2' , 1.098,228.5),
('O2' , 1.208,120.5),
('F2' , 1.412,38.5),
('P2' , 1.893,117.3),
('Cl2', 1.988,58.0)
]
systems = [ a[0] for a in tpss_de ]
ref = [ a[1] for a in tpss_de ]
# Add atoms
for formula in systems:
temp = split_formula(formula)
for atom in temp:
if atom not in systems:
systems.append(atom)
energies = {}
# Calculate energies
i = 0
for formula in systems:
if formula == 'Be2':
loa = Atoms('Be2', [(0, 0, 0), (0, 0, 2.0212)])
else:
loa = molecule(formula)
loa.set_cell(cell)
loa.center()
width = 0.0
calc = GPAW(h=.18,
nbands=-5,
xc='PBE',
txt=formula + '.txt')
if len(loa) == 1:
calc.set(hund=True)
calc.set(fixmom=True)
calc.set(mixer=MixerDif())
else:
calc.set(mixer=Mixer())
pos = loa.get_positions()
pos[1,:] = pos[0,:] + [exp_bonds_dE[i][1],0.0,0.0]
loa.set_positions(pos)
loa.center()
loa.set_calculator(calc)
try:
energy = loa.get_potential_energy()
difft = calc.get_xc_difference('TPSS')
diffr = calc.get_xc_difference('revTPSS')
diffm = calc.get_xc_difference('M06L')
energies[formula]=(energy, energy+difft, energy+diffr,energy+diffm)
except:
print >>data, formula, 'Error'
else:
print >>data, formula, energy, energy+difft, energy+diffr, energy+diffm
data.flush()
i += 1
#calculate atomization energies
ii =0
file = paropen('atom_en.dat', 'a')
print >>file, "# formula \t PBE \t TPSS \t revTPSS \t M06L \t Exp"
for formula in systems[:13]:
try:
atoms_formula = split_formula(formula)
de_tpss = -1.0 * energies[formula][1]
de_revtpss = -1.0 * energies[formula][2]
de_m06l = -1.0 * energies[formula][3]
de_pbe = -1.0 * energies[formula][0]
for atom_formula in atoms_formula:
de_tpss += energies[atom_formula][1]
de_revtpss += energies[atom_formula][2]
de_m06l += energies[atom_formula][3]
de_pbe += energies[atom_formula][0]
except:
print >>file, formula, 'Error'
else:
de_tpss *= 627.5/27.211
de_revtpss *= 627.5/27.211
de_m06l *= 627.5/27.211
de_pbe *= 627.5/27.211
out = "%s\t%.1f \t%.1f \t%.1f \t%.1f \t%.1f" %(formula, de_pbe, de_tpss, de_revtpss, de_m06l ,exp_bonds_dE[ii][2])
print >>file, out
file.flush()
ii += 1
|
ajylee/gpaw-rtxs
|
gpaw/test/big/tpss/tpss.py
|
Python
|
gpl-3.0
| 3,230
|
[
"ASE",
"GPAW"
] |
4016b52ff720490f3382fdc6dbc45462b27300c1bcabff7f0780e97f2aaebad1
|
#!/usr/bin/env python
# Mesa 3-D graphics library
#
# Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This script is used to generate the get.c file:
# python get_gen.py > get.c
import string
import sys
GLint = 1
GLenum = 2
GLfloat = 3
GLdouble = 4
GLboolean = 5
GLfloatN = 6 # A normalized value, such as a color or depth range
GLfixed = 7
TypeStrings = {
GLint : "GLint",
GLenum : "GLenum",
GLfloat : "GLfloat",
GLdouble : "GLdouble",
GLboolean : "GLboolean",
GLfixed : "GLfixed"
}
# Each entry is a tuple of:
# - the GL state name, such as GL_CURRENT_COLOR
# - the state datatype, one of GLint, GLfloat, GLboolean or GLenum
# - list of code fragments to get the state, such as ["ctx->Foo.Bar"]
# - optional extra code or empty string
# - optional extensions to check, or None
#
# Present in ES 1.x and 2.x:
StateVars_common = [
( "GL_ALPHA_BITS", GLint, ["ctx->DrawBuffer->Visual.alphaBits"],
"", None ),
( "GL_BLEND", GLboolean, ["ctx->Color.BlendEnabled"], "", None ),
( "GL_BLEND_SRC", GLenum, ["ctx->Color.BlendSrcRGB"], "", None ),
( "GL_BLUE_BITS", GLint, ["ctx->DrawBuffer->Visual.blueBits"], "", None ),
( "GL_COLOR_CLEAR_VALUE", GLfloatN,
[ "ctx->Color.ClearColor[0]",
"ctx->Color.ClearColor[1]",
"ctx->Color.ClearColor[2]",
"ctx->Color.ClearColor[3]" ], "", None ),
( "GL_COLOR_WRITEMASK", GLint,
[ "ctx->Color.ColorMask[RCOMP] ? 1 : 0",
"ctx->Color.ColorMask[GCOMP] ? 1 : 0",
"ctx->Color.ColorMask[BCOMP] ? 1 : 0",
"ctx->Color.ColorMask[ACOMP] ? 1 : 0" ], "", None ),
( "GL_CULL_FACE", GLboolean, ["ctx->Polygon.CullFlag"], "", None ),
( "GL_CULL_FACE_MODE", GLenum, ["ctx->Polygon.CullFaceMode"], "", None ),
( "GL_DEPTH_BITS", GLint, ["ctx->DrawBuffer->Visual.depthBits"],
"", None ),
( "GL_DEPTH_CLEAR_VALUE", GLfloatN, ["ctx->Depth.Clear"], "", None ),
( "GL_DEPTH_FUNC", GLenum, ["ctx->Depth.Func"], "", None ),
( "GL_DEPTH_RANGE", GLfloatN,
[ "ctx->Viewport.Near", "ctx->Viewport.Far" ], "", None ),
( "GL_DEPTH_TEST", GLboolean, ["ctx->Depth.Test"], "", None ),
( "GL_DEPTH_WRITEMASK", GLboolean, ["ctx->Depth.Mask"], "", None ),
( "GL_DITHER", GLboolean, ["ctx->Color.DitherFlag"], "", None ),
( "GL_FRONT_FACE", GLenum, ["ctx->Polygon.FrontFace"], "", None ),
( "GL_GREEN_BITS", GLint, ["ctx->DrawBuffer->Visual.greenBits"],
"", None ),
( "GL_LINE_WIDTH", GLfloat, ["ctx->Line.Width"], "", None ),
( "GL_ALIASED_LINE_WIDTH_RANGE", GLfloat,
["ctx->Const.MinLineWidth",
"ctx->Const.MaxLineWidth"], "", None ),
( "GL_MAX_ELEMENTS_INDICES", GLint, ["ctx->Const.MaxArrayLockSize"], "", None ),
( "GL_MAX_ELEMENTS_VERTICES", GLint, ["ctx->Const.MaxArrayLockSize"], "", None ),
( "GL_MAX_TEXTURE_SIZE", GLint, ["1 << (ctx->Const.MaxTextureLevels - 1)"], "", None ),
( "GL_MAX_VIEWPORT_DIMS", GLint,
["ctx->Const.MaxViewportWidth", "ctx->Const.MaxViewportHeight"],
"", None ),
( "GL_PACK_ALIGNMENT", GLint, ["ctx->Pack.Alignment"], "", None ),
( "GL_ALIASED_POINT_SIZE_RANGE", GLfloat,
["ctx->Const.MinPointSize",
"ctx->Const.MaxPointSize"], "", None ),
( "GL_POLYGON_OFFSET_FACTOR", GLfloat, ["ctx->Polygon.OffsetFactor "], "", None ),
( "GL_POLYGON_OFFSET_UNITS", GLfloat, ["ctx->Polygon.OffsetUnits "], "", None ),
( "GL_RED_BITS", GLint, ["ctx->DrawBuffer->Visual.redBits"], "", None ),
( "GL_SCISSOR_BOX", GLint,
["ctx->Scissor.X",
"ctx->Scissor.Y",
"ctx->Scissor.Width",
"ctx->Scissor.Height"], "", None ),
( "GL_SCISSOR_TEST", GLboolean, ["ctx->Scissor.Enabled"], "", None ),
( "GL_STENCIL_BITS", GLint, ["ctx->DrawBuffer->Visual.stencilBits"], "", None ),
( "GL_STENCIL_CLEAR_VALUE", GLint, ["ctx->Stencil.Clear"], "", None ),
( "GL_STENCIL_FAIL", GLenum,
["ctx->Stencil.FailFunc[ctx->Stencil.ActiveFace]"], "", None ),
( "GL_STENCIL_FUNC", GLenum,
["ctx->Stencil.Function[ctx->Stencil.ActiveFace]"], "", None ),
( "GL_STENCIL_PASS_DEPTH_FAIL", GLenum,
["ctx->Stencil.ZFailFunc[ctx->Stencil.ActiveFace]"], "", None ),
( "GL_STENCIL_PASS_DEPTH_PASS", GLenum,
["ctx->Stencil.ZPassFunc[ctx->Stencil.ActiveFace]"], "", None ),
( "GL_STENCIL_REF", GLint,
["ctx->Stencil.Ref[ctx->Stencil.ActiveFace]"], "", None ),
( "GL_STENCIL_TEST", GLboolean, ["ctx->Stencil.Enabled"], "", None ),
( "GL_STENCIL_VALUE_MASK", GLint,
["ctx->Stencil.ValueMask[ctx->Stencil.ActiveFace]"], "", None ),
( "GL_STENCIL_WRITEMASK", GLint,
["ctx->Stencil.WriteMask[ctx->Stencil.ActiveFace]"], "", None ),
( "GL_SUBPIXEL_BITS", GLint, ["ctx->Const.SubPixelBits"], "", None ),
( "GL_TEXTURE_BINDING_2D", GLint,
["ctx->Texture.Unit[ctx->Texture.CurrentUnit].CurrentTex[TEXTURE_2D_INDEX]->Name"], "", None ),
( "GL_UNPACK_ALIGNMENT", GLint, ["ctx->Unpack.Alignment"], "", None ),
( "GL_VIEWPORT", GLint, [ "ctx->Viewport.X", "ctx->Viewport.Y",
"ctx->Viewport.Width", "ctx->Viewport.Height" ], "", None ),
# GL_ARB_multitexture
( "GL_ACTIVE_TEXTURE_ARB", GLint,
[ "GL_TEXTURE0_ARB + ctx->Texture.CurrentUnit"], "", ["ARB_multitexture"] ),
# Note that all the OES_* extensions require that the Mesa
# "struct gl_extensions" include a member with the name of
# the extension. That structure does not yet include OES
# extensions (and we're not sure whether it will). If
# it does, all the OES_* extensions below should mark the
# dependency.
# OES_texture_cube_map
( "GL_TEXTURE_BINDING_CUBE_MAP_ARB", GLint,
["ctx->Texture.Unit[ctx->Texture.CurrentUnit].CurrentTex[TEXTURE_CUBE_INDEX]->Name"],
"", None),
( "GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB", GLint,
["(1 << (ctx->Const.MaxCubeTextureLevels - 1))"],
"", None),
# OES_blend_subtract
( "GL_BLEND_SRC_RGB_EXT", GLenum, ["ctx->Color.BlendSrcRGB"], "", None),
( "GL_BLEND_DST_RGB_EXT", GLenum, ["ctx->Color.BlendDstRGB"], "", None),
( "GL_BLEND_SRC_ALPHA_EXT", GLenum, ["ctx->Color.BlendSrcA"], "", None),
( "GL_BLEND_DST_ALPHA_EXT", GLenum, ["ctx->Color.BlendDstA"], "", None),
# GL_BLEND_EQUATION_RGB, which is what we're really after,
# is defined identically to GL_BLEND_EQUATION.
( "GL_BLEND_EQUATION", GLenum, ["ctx->Color.BlendEquationRGB "], "", None),
( "GL_BLEND_EQUATION_ALPHA_EXT", GLenum, ["ctx->Color.BlendEquationA "],
"", None),
# GL_ARB_texture_compression */
# ( "GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB", GLint,
# ["_mesa_get_compressed_formats(ctx, NULL, GL_FALSE)"],
# "", ["ARB_texture_compression"] ),
# ( "GL_COMPRESSED_TEXTURE_FORMATS_ARB", GLenum,
# [],
# """GLint formats[100];
# GLuint i, n = _mesa_get_compressed_formats(ctx, formats, GL_FALSE);
# ASSERT(n <= 100);
# for (i = 0; i < n; i++)
# params[i] = ENUM_TO_INT(formats[i]);""",
# ["ARB_texture_compression"] ),
# GL_ARB_multisample
( "GL_SAMPLE_ALPHA_TO_COVERAGE_ARB", GLboolean,
["ctx->Multisample.SampleAlphaToCoverage"], "", ["ARB_multisample"] ),
( "GL_SAMPLE_COVERAGE_ARB", GLboolean,
["ctx->Multisample.SampleCoverage"], "", ["ARB_multisample"] ),
( "GL_SAMPLE_COVERAGE_VALUE_ARB", GLfloat,
["ctx->Multisample.SampleCoverageValue"], "", ["ARB_multisample"] ),
( "GL_SAMPLE_COVERAGE_INVERT_ARB", GLboolean,
["ctx->Multisample.SampleCoverageInvert"], "", ["ARB_multisample"] ),
( "GL_SAMPLE_BUFFERS_ARB", GLint,
["ctx->DrawBuffer->Visual.sampleBuffers"], "", ["ARB_multisample"] ),
( "GL_SAMPLES_ARB", GLint,
["ctx->DrawBuffer->Visual.samples"], "", ["ARB_multisample"] ),
# GL_SGIS_generate_mipmap
( "GL_GENERATE_MIPMAP_HINT_SGIS", GLenum, ["ctx->Hint.GenerateMipmap"],
"", ["SGIS_generate_mipmap"] ),
# GL_ARB_vertex_buffer_object
( "GL_ARRAY_BUFFER_BINDING_ARB", GLint,
["ctx->Array.ArrayBufferObj->Name"], "", ["ARB_vertex_buffer_object"] ),
# GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB - not supported
( "GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB", GLint,
["ctx->Array.ElementArrayBufferObj->Name"],
"", ["ARB_vertex_buffer_object"] ),
# GL_OES_read_format
( "GL_IMPLEMENTATION_COLOR_READ_TYPE_OES", GLint,
["_mesa_get_color_read_type(ctx)"], "", ["OES_read_format"] ),
( "GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES", GLint,
["_mesa_get_color_read_format(ctx)"], "", ["OES_read_format"] ),
# GL_OES_framebuffer_object
( "GL_FRAMEBUFFER_BINDING_EXT", GLint, ["ctx->DrawBuffer->Name"], "",
None),
( "GL_RENDERBUFFER_BINDING_EXT", GLint,
["ctx->CurrentRenderbuffer ? ctx->CurrentRenderbuffer->Name : 0"], "",
None),
( "GL_MAX_RENDERBUFFER_SIZE_EXT", GLint,
["ctx->Const.MaxRenderbufferSize"], "",
None),
# OpenGL ES 1/2 special:
( "GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB", GLint,
[ "ARRAY_SIZE(compressed_formats)" ],
"",
None ),
("GL_COMPRESSED_TEXTURE_FORMATS_ARB", GLint,
[],
"""
int i;
for (i = 0; i < ARRAY_SIZE(compressed_formats); i++) {
params[i] = compressed_formats[i];
}""",
None ),
( "GL_POLYGON_OFFSET_FILL", GLboolean, ["ctx->Polygon.OffsetFill"], "", None ),
]
# Only present in ES 1.x:
StateVars_es1 = [
( "GL_MAX_LIGHTS", GLint, ["ctx->Const.MaxLights"], "", None ),
( "GL_LIGHT0", GLboolean, ["ctx->Light.Light[0].Enabled"], "", None ),
( "GL_LIGHT1", GLboolean, ["ctx->Light.Light[1].Enabled"], "", None ),
( "GL_LIGHT2", GLboolean, ["ctx->Light.Light[2].Enabled"], "", None ),
( "GL_LIGHT3", GLboolean, ["ctx->Light.Light[3].Enabled"], "", None ),
( "GL_LIGHT4", GLboolean, ["ctx->Light.Light[4].Enabled"], "", None ),
( "GL_LIGHT5", GLboolean, ["ctx->Light.Light[5].Enabled"], "", None ),
( "GL_LIGHT6", GLboolean, ["ctx->Light.Light[6].Enabled"], "", None ),
( "GL_LIGHT7", GLboolean, ["ctx->Light.Light[7].Enabled"], "", None ),
( "GL_LIGHTING", GLboolean, ["ctx->Light.Enabled"], "", None ),
( "GL_LIGHT_MODEL_AMBIENT", GLfloatN,
["ctx->Light.Model.Ambient[0]",
"ctx->Light.Model.Ambient[1]",
"ctx->Light.Model.Ambient[2]",
"ctx->Light.Model.Ambient[3]"], "", None ),
( "GL_LIGHT_MODEL_TWO_SIDE", GLboolean, ["ctx->Light.Model.TwoSide"], "", None ),
( "GL_ALPHA_TEST", GLboolean, ["ctx->Color.AlphaEnabled"], "", None ),
( "GL_ALPHA_TEST_FUNC", GLenum, ["ctx->Color.AlphaFunc"], "", None ),
( "GL_ALPHA_TEST_REF", GLfloatN, ["ctx->Color.AlphaRef"], "", None ),
( "GL_BLEND_DST", GLenum, ["ctx->Color.BlendDstRGB"], "", None ),
( "GL_MAX_CLIP_PLANES", GLint, ["ctx->Const.MaxClipPlanes"], "", None ),
( "GL_CLIP_PLANE0", GLboolean,
[ "(ctx->Transform.ClipPlanesEnabled >> 0) & 1" ], "", None ),
( "GL_CLIP_PLANE1", GLboolean,
[ "(ctx->Transform.ClipPlanesEnabled >> 1) & 1" ], "", None ),
( "GL_CLIP_PLANE2", GLboolean,
[ "(ctx->Transform.ClipPlanesEnabled >> 2) & 1" ], "", None ),
( "GL_CLIP_PLANE3", GLboolean,
[ "(ctx->Transform.ClipPlanesEnabled >> 3) & 1" ], "", None ),
( "GL_CLIP_PLANE4", GLboolean,
[ "(ctx->Transform.ClipPlanesEnabled >> 4) & 1" ], "", None ),
( "GL_CLIP_PLANE5", GLboolean,
[ "(ctx->Transform.ClipPlanesEnabled >> 5) & 1" ], "", None ),
( "GL_COLOR_MATERIAL", GLboolean,
["ctx->Light.ColorMaterialEnabled"], "", None ),
( "GL_CURRENT_COLOR", GLfloatN,
[ "ctx->Current.Attrib[VERT_ATTRIB_COLOR0][0]",
"ctx->Current.Attrib[VERT_ATTRIB_COLOR0][1]",
"ctx->Current.Attrib[VERT_ATTRIB_COLOR0][2]",
"ctx->Current.Attrib[VERT_ATTRIB_COLOR0][3]" ],
"FLUSH_CURRENT(ctx, 0);", None ),
( "GL_CURRENT_NORMAL", GLfloatN,
[ "ctx->Current.Attrib[VERT_ATTRIB_NORMAL][0]",
"ctx->Current.Attrib[VERT_ATTRIB_NORMAL][1]",
"ctx->Current.Attrib[VERT_ATTRIB_NORMAL][2]"],
"FLUSH_CURRENT(ctx, 0);", None ),
( "GL_CURRENT_TEXTURE_COORDS", GLfloat,
["ctx->Current.Attrib[VERT_ATTRIB_TEX0 + texUnit][0]",
"ctx->Current.Attrib[VERT_ATTRIB_TEX0 + texUnit][1]",
"ctx->Current.Attrib[VERT_ATTRIB_TEX0 + texUnit][2]",
"ctx->Current.Attrib[VERT_ATTRIB_TEX0 + texUnit][3]"],
"const GLuint texUnit = ctx->Texture.CurrentUnit;", None ),
( "GL_DISTANCE_ATTENUATION_EXT", GLfloat,
["ctx->Point.Params[0]",
"ctx->Point.Params[1]",
"ctx->Point.Params[2]"], "", None ),
( "GL_FOG", GLboolean, ["ctx->Fog.Enabled"], "", None ),
( "GL_FOG_COLOR", GLfloatN,
[ "ctx->Fog.Color[0]",
"ctx->Fog.Color[1]",
"ctx->Fog.Color[2]",
"ctx->Fog.Color[3]" ], "", None ),
( "GL_FOG_DENSITY", GLfloat, ["ctx->Fog.Density"], "", None ),
( "GL_FOG_END", GLfloat, ["ctx->Fog.End"], "", None ),
( "GL_FOG_HINT", GLenum, ["ctx->Hint.Fog"], "", None ),
( "GL_FOG_MODE", GLenum, ["ctx->Fog.Mode"], "", None ),
( "GL_FOG_START", GLfloat, ["ctx->Fog.Start"], "", None ),
( "GL_LINE_SMOOTH", GLboolean, ["ctx->Line.SmoothFlag"], "", None ),
( "GL_LINE_SMOOTH_HINT", GLenum, ["ctx->Hint.LineSmooth"], "", None ),
( "GL_LINE_WIDTH_RANGE", GLfloat,
["ctx->Const.MinLineWidthAA",
"ctx->Const.MaxLineWidthAA"], "", None ),
( "GL_COLOR_LOGIC_OP", GLboolean, ["ctx->Color.ColorLogicOpEnabled"], "", None ),
( "GL_LOGIC_OP_MODE", GLenum, ["ctx->Color.LogicOp"], "", None ),
( "GL_MATRIX_MODE", GLenum, ["ctx->Transform.MatrixMode"], "", None ),
( "GL_MAX_MODELVIEW_STACK_DEPTH", GLint, ["MAX_MODELVIEW_STACK_DEPTH"], "", None ),
( "GL_MAX_PROJECTION_STACK_DEPTH", GLint, ["MAX_PROJECTION_STACK_DEPTH"], "", None ),
( "GL_MAX_TEXTURE_STACK_DEPTH", GLint, ["MAX_TEXTURE_STACK_DEPTH"], "", None ),
( "GL_MODELVIEW_MATRIX", GLfloat,
[ "matrix[0]", "matrix[1]", "matrix[2]", "matrix[3]",
"matrix[4]", "matrix[5]", "matrix[6]", "matrix[7]",
"matrix[8]", "matrix[9]", "matrix[10]", "matrix[11]",
"matrix[12]", "matrix[13]", "matrix[14]", "matrix[15]" ],
"const GLfloat *matrix = ctx->ModelviewMatrixStack.Top->m;", None ),
( "GL_MODELVIEW_STACK_DEPTH", GLint, ["ctx->ModelviewMatrixStack.Depth + 1"], "", None ),
( "GL_NORMALIZE", GLboolean, ["ctx->Transform.Normalize"], "", None ),
( "GL_PACK_SKIP_IMAGES_EXT", GLint, ["ctx->Pack.SkipImages"], "", None ),
( "GL_PERSPECTIVE_CORRECTION_HINT", GLenum,
["ctx->Hint.PerspectiveCorrection"], "", None ),
( "GL_POINT_SIZE", GLfloat, ["ctx->Point.Size"], "", None ),
( "GL_POINT_SIZE_RANGE", GLfloat,
["ctx->Const.MinPointSizeAA",
"ctx->Const.MaxPointSizeAA"], "", None ),
( "GL_POINT_SMOOTH", GLboolean, ["ctx->Point.SmoothFlag"], "", None ),
( "GL_POINT_SMOOTH_HINT", GLenum, ["ctx->Hint.PointSmooth"], "", None ),
( "GL_POINT_SIZE_MIN_EXT", GLfloat, ["ctx->Point.MinSize"], "", None ),
( "GL_POINT_SIZE_MAX_EXT", GLfloat, ["ctx->Point.MaxSize"], "", None ),
( "GL_POINT_FADE_THRESHOLD_SIZE_EXT", GLfloat,
["ctx->Point.Threshold"], "", None ),
( "GL_PROJECTION_MATRIX", GLfloat,
[ "matrix[0]", "matrix[1]", "matrix[2]", "matrix[3]",
"matrix[4]", "matrix[5]", "matrix[6]", "matrix[7]",
"matrix[8]", "matrix[9]", "matrix[10]", "matrix[11]",
"matrix[12]", "matrix[13]", "matrix[14]", "matrix[15]" ],
"const GLfloat *matrix = ctx->ProjectionMatrixStack.Top->m;", None ),
( "GL_PROJECTION_STACK_DEPTH", GLint,
["ctx->ProjectionMatrixStack.Depth + 1"], "", None ),
( "GL_RESCALE_NORMAL", GLboolean,
["ctx->Transform.RescaleNormals"], "", None ),
( "GL_SHADE_MODEL", GLenum, ["ctx->Light.ShadeModel"], "", None ),
( "GL_TEXTURE_2D", GLboolean, ["_mesa_IsEnabled(GL_TEXTURE_2D)"], "", None ),
( "GL_TEXTURE_MATRIX", GLfloat,
["matrix[0]", "matrix[1]", "matrix[2]", "matrix[3]",
"matrix[4]", "matrix[5]", "matrix[6]", "matrix[7]",
"matrix[8]", "matrix[9]", "matrix[10]", "matrix[11]",
"matrix[12]", "matrix[13]", "matrix[14]", "matrix[15]" ],
"const GLfloat *matrix = ctx->TextureMatrixStack[ctx->Texture.CurrentUnit].Top->m;", None ),
( "GL_TEXTURE_STACK_DEPTH", GLint,
["ctx->TextureMatrixStack[ctx->Texture.CurrentUnit].Depth + 1"], "", None ),
( "GL_VERTEX_ARRAY", GLboolean, ["ctx->Array.ArrayObj->Vertex.Enabled"], "", None ),
( "GL_VERTEX_ARRAY_SIZE", GLint, ["ctx->Array.ArrayObj->Vertex.Size"], "", None ),
( "GL_VERTEX_ARRAY_TYPE", GLenum, ["ctx->Array.ArrayObj->Vertex.Type"], "", None ),
( "GL_VERTEX_ARRAY_STRIDE", GLint, ["ctx->Array.ArrayObj->Vertex.Stride"], "", None ),
( "GL_NORMAL_ARRAY", GLenum, ["ctx->Array.ArrayObj->Normal.Enabled"], "", None ),
( "GL_NORMAL_ARRAY_TYPE", GLenum, ["ctx->Array.ArrayObj->Normal.Type"], "", None ),
( "GL_NORMAL_ARRAY_STRIDE", GLint, ["ctx->Array.ArrayObj->Normal.Stride"], "", None ),
( "GL_COLOR_ARRAY", GLboolean, ["ctx->Array.ArrayObj->Color.Enabled"], "", None ),
( "GL_COLOR_ARRAY_SIZE", GLint, ["ctx->Array.ArrayObj->Color.Size"], "", None ),
( "GL_COLOR_ARRAY_TYPE", GLenum, ["ctx->Array.ArrayObj->Color.Type"], "", None ),
( "GL_COLOR_ARRAY_STRIDE", GLint, ["ctx->Array.ArrayObj->Color.Stride"], "", None ),
( "GL_TEXTURE_COORD_ARRAY", GLboolean,
["ctx->Array.ArrayObj->TexCoord[ctx->Array.ActiveTexture].Enabled"], "", None ),
( "GL_TEXTURE_COORD_ARRAY_SIZE", GLint,
["ctx->Array.ArrayObj->TexCoord[ctx->Array.ActiveTexture].Size"], "", None ),
( "GL_TEXTURE_COORD_ARRAY_TYPE", GLenum,
["ctx->Array.ArrayObj->TexCoord[ctx->Array.ActiveTexture].Type"], "", None ),
( "GL_TEXTURE_COORD_ARRAY_STRIDE", GLint,
["ctx->Array.ArrayObj->TexCoord[ctx->Array.ActiveTexture].Stride"], "", None ),
# GL_ARB_multitexture
( "GL_MAX_TEXTURE_UNITS_ARB", GLint,
["ctx->Const.MaxTextureUnits"], "", ["ARB_multitexture"] ),
( "GL_CLIENT_ACTIVE_TEXTURE_ARB", GLint,
["GL_TEXTURE0_ARB + ctx->Array.ActiveTexture"], "", ["ARB_multitexture"] ),
# OES_texture_cube_map
( "GL_TEXTURE_CUBE_MAP_ARB", GLboolean,
["_mesa_IsEnabled(GL_TEXTURE_CUBE_MAP_ARB)"], "", None),
( "GL_TEXTURE_GEN_STR_OES", GLboolean,
# S, T, and R are always set at the same time
["((ctx->Texture.Unit[ctx->Texture.CurrentUnit].TexGenEnabled & S_BIT) ? 1 : 0)"], "", None),
# ARB_multisample
( "GL_MULTISAMPLE_ARB", GLboolean,
["ctx->Multisample.Enabled"], "", ["ARB_multisample"] ),
( "GL_SAMPLE_ALPHA_TO_ONE_ARB", GLboolean,
["ctx->Multisample.SampleAlphaToOne"], "", ["ARB_multisample"] ),
( "GL_VERTEX_ARRAY_BUFFER_BINDING_ARB", GLint,
["ctx->Array.ArrayObj->Vertex.BufferObj->Name"], "", ["ARB_vertex_buffer_object"] ),
( "GL_NORMAL_ARRAY_BUFFER_BINDING_ARB", GLint,
["ctx->Array.ArrayObj->Normal.BufferObj->Name"], "", ["ARB_vertex_buffer_object"] ),
( "GL_COLOR_ARRAY_BUFFER_BINDING_ARB", GLint,
["ctx->Array.ArrayObj->Color.BufferObj->Name"], "", ["ARB_vertex_buffer_object"] ),
( "GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB", GLint,
["ctx->Array.ArrayObj->TexCoord[ctx->Array.ActiveTexture].BufferObj->Name"],
"", ["ARB_vertex_buffer_object"] ),
# OES_point_sprite
( "GL_POINT_SPRITE_NV", GLboolean, ["ctx->Point.PointSprite"], # == GL_POINT_SPRITE_ARB
"", None),
# GL_ARB_fragment_shader
( "GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB", GLint,
["ctx->Const.FragmentProgram.MaxUniformComponents"], "",
["ARB_fragment_shader"] ),
# GL_ARB_vertex_shader
( "GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB", GLint,
["ctx->Const.VertexProgram.MaxUniformComponents"], "",
["ARB_vertex_shader"] ),
( "GL_MAX_VARYING_FLOATS_ARB", GLint,
["ctx->Const.MaxVarying * 4"], "", ["ARB_vertex_shader"] ),
# OES_matrix_get
( "GL_MODELVIEW_MATRIX_FLOAT_AS_INT_BITS_OES", GLint, [],
"""
/* See GL_OES_matrix_get */
{
const GLfloat *matrix = ctx->ModelviewMatrixStack.Top->m;
memcpy(params, matrix, 16 * sizeof(GLint));
}""",
None),
( "GL_PROJECTION_MATRIX_FLOAT_AS_INT_BITS_OES", GLint, [],
"""
/* See GL_OES_matrix_get */
{
const GLfloat *matrix = ctx->ProjectionMatrixStack.Top->m;
memcpy(params, matrix, 16 * sizeof(GLint));
}""",
None),
( "GL_TEXTURE_MATRIX_FLOAT_AS_INT_BITS_OES", GLint, [],
"""
/* See GL_OES_matrix_get */
{
const GLfloat *matrix =
ctx->TextureMatrixStack[ctx->Texture.CurrentUnit].Top->m;
memcpy(params, matrix, 16 * sizeof(GLint));
}""",
None),
# OES_point_size_array
("GL_POINT_SIZE_ARRAY_OES", GLboolean,
["ctx->Array.ArrayObj->PointSize.Enabled"], "", None),
("GL_POINT_SIZE_ARRAY_TYPE_OES", GLenum,
["ctx->Array.ArrayObj->PointSize.Type"], "", None),
("GL_POINT_SIZE_ARRAY_STRIDE_OES", GLint,
["ctx->Array.ArrayObj->PointSize.Stride"], "", None),
("GL_POINT_SIZE_ARRAY_BUFFER_BINDING_OES", GLint,
["ctx->Array.ArrayObj->PointSize.BufferObj->Name"], "", None),
# GL_EXT_texture_lod_bias
( "GL_MAX_TEXTURE_LOD_BIAS_EXT", GLfloat,
["ctx->Const.MaxTextureLodBias"], "", ["EXT_texture_lod_bias"]),
# GL_EXT_texture_filter_anisotropic
( "GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT", GLfloat,
["ctx->Const.MaxTextureMaxAnisotropy"], "", ["EXT_texture_filter_anisotropic"]),
]
# Only present in ES 2.x:
StateVars_es2 = [
# XXX These entries are not spec'ed for GLES 2, but are
# needed for Mesa's GLSL:
( "GL_MAX_LIGHTS", GLint, ["ctx->Const.MaxLights"], "", None ),
( "GL_MAX_CLIP_PLANES", GLint, ["ctx->Const.MaxClipPlanes"], "", None ),
( "GL_MAX_TEXTURE_COORDS_ARB", GLint, # == GL_MAX_TEXTURE_COORDS_NV
["ctx->Const.MaxTextureCoordUnits"], "",
["ARB_fragment_program", "NV_fragment_program"] ),
( "GL_MAX_DRAW_BUFFERS_ARB", GLint,
["ctx->Const.MaxDrawBuffers"], "", ["ARB_draw_buffers"] ),
( "GL_BLEND_COLOR_EXT", GLfloatN,
[ "ctx->Color.BlendColor[0]",
"ctx->Color.BlendColor[1]",
"ctx->Color.BlendColor[2]",
"ctx->Color.BlendColor[3]"], "", None ),
# This is required for GLES2, but also needed for GLSL:
( "GL_MAX_TEXTURE_IMAGE_UNITS_ARB", GLint, # == GL_MAX_TEXTURE_IMAGE_UNI
["ctx->Const.MaxTextureImageUnits"], "",
["ARB_fragment_program", "NV_fragment_program"] ),
( "GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB", GLint,
["ctx->Const.MaxVertexTextureImageUnits"], "", ["ARB_vertex_shader"] ),
( "GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB", GLint,
["MAX_COMBINED_TEXTURE_IMAGE_UNITS"], "", ["ARB_vertex_shader"] ),
# GL_ARB_shader_objects
# Actually, this token isn't part of GL_ARB_shader_objects, but is
# close enough for now.
( "GL_CURRENT_PROGRAM", GLint,
["ctx->Shader.CurrentProgram ? ctx->Shader.CurrentProgram->Name : 0"],
"", ["ARB_shader_objects"] ),
# OpenGL 2.0
( "GL_STENCIL_BACK_FUNC", GLenum, ["ctx->Stencil.Function[1]"], "", None ),
( "GL_STENCIL_BACK_VALUE_MASK", GLint, ["ctx->Stencil.ValueMask[1]"], "", None ),
( "GL_STENCIL_BACK_WRITEMASK", GLint, ["ctx->Stencil.WriteMask[1]"], "", None ),
( "GL_STENCIL_BACK_REF", GLint, ["ctx->Stencil.Ref[1]"], "", None ),
( "GL_STENCIL_BACK_FAIL", GLenum, ["ctx->Stencil.FailFunc[1]"], "", None ),
( "GL_STENCIL_BACK_PASS_DEPTH_FAIL", GLenum, ["ctx->Stencil.ZFailFunc[1]"], "", None ),
( "GL_STENCIL_BACK_PASS_DEPTH_PASS", GLenum, ["ctx->Stencil.ZPassFunc[1]"], "", None ),
( "GL_MAX_VERTEX_ATTRIBS_ARB", GLint,
["ctx->Const.VertexProgram.MaxAttribs"], "", ["ARB_vertex_program"] ),
# OES_texture_3D
( "GL_TEXTURE_BINDING_3D", GLint,
["ctx->Texture.Unit[ctx->Texture.CurrentUnit].CurrentTex[TEXTURE_3D_INDEX]->Name"], "", None),
( "GL_MAX_3D_TEXTURE_SIZE", GLint, ["1 << (ctx->Const.Max3DTextureLevels - 1)"], "", None),
# OES_standard_derivatives
( "GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB", GLenum,
["ctx->Hint.FragmentShaderDerivative"], "", ["ARB_fragment_shader"] ),
# Unique to ES 2 (not in full GL)
( "GL_MAX_FRAGMENT_UNIFORM_VECTORS", GLint,
["ctx->Const.FragmentProgram.MaxUniformComponents / 4"], "", None),
( "GL_MAX_VARYING_VECTORS", GLint,
["ctx->Const.MaxVarying"], "", None),
( "GL_MAX_VERTEX_UNIFORM_VECTORS", GLint,
["ctx->Const.VertexProgram.MaxUniformComponents / 4"], "", None),
( "GL_SHADER_COMPILER", GLint, ["1"], "", None),
# OES_get_program_binary
( "GL_NUM_SHADER_BINARY_FORMATS", GLint, ["0"], "", None),
( "GL_SHADER_BINARY_FORMATS", GLint, [], "", None),
]
def ConversionFunc(fromType, toType):
"""Return the name of the macro to convert between two data types."""
if fromType == toType:
return ""
elif fromType == GLfloat and toType == GLint:
return "IROUND"
elif fromType == GLfloatN and toType == GLfloat:
return ""
elif fromType == GLint and toType == GLfloat: # but not GLfloatN!
return "(GLfloat)"
else:
if fromType == GLfloatN:
fromType = GLfloat
fromStr = TypeStrings[fromType]
fromStr = string.upper(fromStr[2:])
toStr = TypeStrings[toType]
toStr = string.upper(toStr[2:])
return fromStr + "_TO_" + toStr
def EmitGetFunction(stateVars, returnType):
"""Emit the code to implement glGetBooleanv, glGetIntegerv or glGetFloatv."""
assert (returnType == GLboolean or
returnType == GLint or
returnType == GLfloat or
returnType == GLfixed)
strType = TypeStrings[returnType]
# Capitalize first letter of return type
if returnType == GLint:
function = "_mesa_GetIntegerv"
elif returnType == GLboolean:
function = "_mesa_GetBooleanv"
elif returnType == GLfloat:
function = "_mesa_GetFloatv"
elif returnType == GLfixed:
function = "_mesa_GetFixedv"
else:
abort()
print "void GLAPIENTRY"
print "%s( GLenum pname, %s *params )" % (function, strType)
print "{"
print " GET_CURRENT_CONTEXT(ctx);"
print " ASSERT_OUTSIDE_BEGIN_END(ctx);"
print ""
print " if (!params)"
print " return;"
print ""
print " if (ctx->NewState)"
print " _mesa_update_state(ctx);"
print ""
print " switch (pname) {"
for (name, varType, state, optionalCode, extensions) in stateVars:
print " case " + name + ":"
if extensions:
if len(extensions) == 1:
print (' CHECK_EXT1(%s, "%s");' %
(extensions[0], function))
elif len(extensions) == 2:
print (' CHECK_EXT2(%s, %s, "%s");' %
(extensions[0], extensions[1], function))
elif len(extensions) == 3:
print (' CHECK_EXT3(%s, %s, %s, "%s");' %
(extensions[0], extensions[1], extensions[2], function))
else:
assert len(extensions) == 4
print (' CHECK_EXT4(%s, %s, %s, %s, "%s");' %
(extensions[0], extensions[1], extensions[2], extensions[3], function))
if optionalCode:
print " {"
print " " + optionalCode
conversion = ConversionFunc(varType, returnType)
n = len(state)
for i in range(n):
if conversion:
print " params[%d] = %s(%s);" % (i, conversion, state[i])
else:
print " params[%d] = %s;" % (i, state[i])
if optionalCode:
print " }"
print " break;"
print " default:"
print ' _mesa_error(ctx, GL_INVALID_ENUM, "gl%s(pname=0x%%x)", pname);' % function
print " }"
print "}"
print ""
return
def EmitHeader():
"""Print the get.c file header."""
print """
/***
*** NOTE!!! DO NOT EDIT THIS FILE!!! IT IS GENERATED BY get_gen.py
***/
#include "main/glheader.h"
#include "main/context.h"
#include "main/enable.h"
#include "main/extensions.h"
#include "main/fbobject.h"
#include "main/get.h"
#include "main/macros.h"
#include "main/mtypes.h"
#include "main/state.h"
#include "main/texcompress.h"
#include "main/framebuffer.h"
/* ES1 tokens that should be in gl.h but aren't */
#define GL_MAX_ELEMENTS_INDICES 0x80E9
#define GL_MAX_ELEMENTS_VERTICES 0x80E8
/* ES2 special tokens */
#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD
#define GL_MAX_VARYING_VECTORS 0x8DFC
#define GL_MAX_VARYING_VECTORS 0x8DFC
#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB
#define GL_SHADER_COMPILER 0x8DFA
#define GL_PLATFORM_BINARY 0x8D63
#define GL_SHADER_BINARY_FORMATS 0x8DF8
#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9
#ifndef GL_OES_matrix_get
#define GL_MODELVIEW_MATRIX_FLOAT_AS_INT_BITS_OES 0x898D
#define GL_PROJECTION_MATRIX_FLOAT_AS_INT_BITS_OES 0x898E
#define GL_TEXTURE_MATRIX_FLOAT_AS_INT_BITS_OES 0x898F
#endif
#ifndef GL_OES_compressed_paletted_texture
#define GL_PALETTE4_RGB8_OES 0x8B90
#define GL_PALETTE4_RGBA8_OES 0x8B91
#define GL_PALETTE4_R5_G6_B5_OES 0x8B92
#define GL_PALETTE4_RGBA4_OES 0x8B93
#define GL_PALETTE4_RGB5_A1_OES 0x8B94
#define GL_PALETTE8_RGB8_OES 0x8B95
#define GL_PALETTE8_RGBA8_OES 0x8B96
#define GL_PALETTE8_R5_G6_B5_OES 0x8B97
#define GL_PALETTE8_RGBA4_OES 0x8B98
#define GL_PALETTE8_RGB5_A1_OES 0x8B99
#endif
/* GL_OES_texture_cube_map */
#ifndef GL_OES_texture_cube_map
#define GL_TEXTURE_GEN_STR_OES 0x8D60
#endif
#define FLOAT_TO_BOOLEAN(X) ( (X) ? GL_TRUE : GL_FALSE )
#define FLOAT_TO_FIXED(F) ( ((F) * 65536.0f > INT_MAX) ? INT_MAX : \\
((F) * 65536.0f < INT_MIN) ? INT_MIN : \\
(GLint) ((F) * 65536.0f) )
#define INT_TO_BOOLEAN(I) ( (I) ? GL_TRUE : GL_FALSE )
#define INT_TO_FIXED(I) ( ((I) > SHRT_MAX) ? INT_MAX : \\
((I) < SHRT_MIN) ? INT_MIN : \\
(GLint) ((I) * 65536) )
#define BOOLEAN_TO_INT(B) ( (GLint) (B) )
#define BOOLEAN_TO_FLOAT(B) ( (B) ? 1.0F : 0.0F )
#define BOOLEAN_TO_FIXED(B) ( (GLint) ((B) ? 1 : 0) << 16 )
#define ENUM_TO_FIXED(E) (E)
/*
* Check if named extension is enabled, if not generate error and return.
*/
#define CHECK_EXT1(EXT1, FUNC) \\
if (!ctx->Extensions.EXT1) { \\
_mesa_error(ctx, GL_INVALID_ENUM, FUNC "(0x%x)", (int) pname); \\
return; \\
}
/*
* Check if either of two extensions is enabled.
*/
#define CHECK_EXT2(EXT1, EXT2, FUNC) \\
if (!ctx->Extensions.EXT1 && !ctx->Extensions.EXT2) { \\
_mesa_error(ctx, GL_INVALID_ENUM, FUNC "(0x%x)", (int) pname); \\
return; \\
}
/*
* Check if either of three extensions is enabled.
*/
#define CHECK_EXT3(EXT1, EXT2, EXT3, FUNC) \\
if (!ctx->Extensions.EXT1 && !ctx->Extensions.EXT2 && \\
!ctx->Extensions.EXT3) { \\
_mesa_error(ctx, GL_INVALID_ENUM, FUNC "(0x%x)", (int) pname); \\
return; \\
}
/*
* Check if either of four extensions is enabled.
*/
#define CHECK_EXT4(EXT1, EXT2, EXT3, EXT4, FUNC) \\
if (!ctx->Extensions.EXT1 && !ctx->Extensions.EXT2 && \\
!ctx->Extensions.EXT3 && !ctx->Extensions.EXT4) { \\
_mesa_error(ctx, GL_INVALID_ENUM, FUNC "(0x%x)", (int) pname); \\
return; \\
}
/**
* List of compressed texture formats supported by ES.
*/
static GLenum compressed_formats[] = {
GL_PALETTE4_RGB8_OES,
GL_PALETTE4_RGBA8_OES,
GL_PALETTE4_R5_G6_B5_OES,
GL_PALETTE4_RGBA4_OES,
GL_PALETTE4_RGB5_A1_OES,
GL_PALETTE8_RGB8_OES,
GL_PALETTE8_RGBA8_OES,
GL_PALETTE8_R5_G6_B5_OES,
GL_PALETTE8_RGBA4_OES,
GL_PALETTE8_RGB5_A1_OES
};
#define ARRAY_SIZE(A) (sizeof(A) / sizeof(A[0]))
void GLAPIENTRY
_mesa_GetFixedv( GLenum pname, GLfixed *params );
"""
return
def EmitAll(stateVars, API):
EmitHeader()
EmitGetFunction(stateVars, GLboolean)
EmitGetFunction(stateVars, GLfloat)
EmitGetFunction(stateVars, GLint)
if API == 1:
EmitGetFunction(stateVars, GLfixed)
def main(args):
# Determine whether to generate ES1 or ES2 queries
if len(args) > 1 and args[1] == "1":
API = 1
elif len(args) > 1 and args[1] == "2":
API = 2
else:
API = 1
#print "len args = %d API = %d" % (len(args), API)
if API == 1:
vars = StateVars_common + StateVars_es1
else:
vars = StateVars_common + StateVars_es2
EmitAll(vars, API)
main(sys.argv)
|
CPFDSoftware-Tony/gmv
|
utils/Mesa/Mesa-7.8.2/src/mesa/es/main/get_gen.py
|
Python
|
gpl-3.0
| 33,026
|
[
"Brian"
] |
34f5400ed8914738056eedb6c002d64dc9381b4ca08b50c639e3c06f8783d73b
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import re
import sys
import string
import hashlib
import itertools
from collections import defaultdict
try:
from collections import OrderedDict
except ImportError:
from .oldpymodules import OrderedDict
from .exceptions import *
from .psiutil import search_file
from .molecule import Molecule
from .periodictable import *
from .libmintsgshell import GaussianShell, ShellInfo
from .libmintsbasissetparser import Gaussian94BasisSetParser
from .basislist import corresponding_basis
if sys.version_info >= (3,0):
basestring = str
basishorde = {}
class BasisSet(object):
"""Basis set container class
Reads the basis set from a checkpoint file object. Also reads the molecule
from the checkpoint file storing the information in an internal Molecule class
which can be accessed using molecule().
"""
# <<< Globals >>>
# Has static information been initialized?
initialized_shared = False
# Global arrays of x, y, z exponents (Need libmint for max ang mom)
LIBINT_MAX_AM = 6 # TODO
exp_ao = [[] for l in range(LIBINT_MAX_AM)]
def __init__(self, *args):
# <<< Basic BasisSet Information >>>
# The name of this basis set (e.g. "BASIS", "RI BASIS")
self.name = None
# Array of gaussian shells
self.shells = None
# Molecule object.
self.molecule = None
# Shell information
self.atom_basis_shell = None
# <<< Scalars >>>
# Number of atomic orbitals (Cartesian)
self.PYnao = None
# Number of basis functions (either cartesian or spherical)
self.PYnbf = None
# The number of unique primitives
self.n_uprimitive = None
# The number of shells
self.n_shells = None
# The number of primitives
self.PYnprimitive = None
# The maximum angular momentum
self.PYmax_am = None
# The maximum number of primitives in a shell
self.PYmax_nprimitive = None
# Whether the basis set is uses spherical basis functions or not
self.puream = None
# <<< Arrays >>>
# The number of primitives (and exponents) in each shell
self.n_prim_per_shell = None
# The first (Cartesian) atomic orbital in each shell
self.shell_first_ao = None
# The first (Cartesian / spherical) basis function in each shell
self.shell_first_basis_function = None
# Shell number to atomic center.
self.shell_center = None
# Which shell does a given (Cartesian / spherical) function belong to?
self.function_to_shell = None
# Which shell does a given Cartesian function belong to?
self.ao_to_shell = None
# Which center is a given function on?
self.function_center = None
# How many shells are there on each center?
self.center_to_nshell = None
# What's the first shell on each center?
self.center_to_shell = None
# The flattened lists of unique exponents
self.uexponents = None
# The flattened lists of unique contraction coefficients (normalized)
self.ucoefficients = None
# The flattened lists of unique contraction coefficients (as provided by the user)
self.uoriginal_coefficients = None
# The flattened lists of ERD normalized contraction coefficients
self.uerd_coefficients = None
# The flattened list of Cartesian coordinates for each atom
self.xyz = None
# Divert to constructor functions
if len(args) == 0:
self.constructor_zero_ao_basis()
elif len(args) == 2 and \
isinstance(args[0], BasisSet) and \
isinstance(args[1], int):
self.constructor_basisset_center(*args)
elif len(args) == 3 and \
isinstance(args[0], basestring) and \
isinstance(args[1], Molecule) and \
isinstance(args[2], OrderedDict):
self.constructor_role_mol_shellmap(*args)
else:
raise ValidationError('BasisSet::constructor: Inappropriate configuration of constructor arguments')
# <<< Methods for Construction >>>
def initialize_singletons(self):
"""Initialize singleton values that are shared by all basis set objects."""
# Populate the exp_ao arrays
for l in range(self.LIBINT_MAX_AM):
for i in range(l + 1):
x = l - i
for j in range(i + 1):
y = i - j
z = j
self.exp_ao[l].append([x, y, z])
def constructor_zero_ao_basis(self):
"""Constructs a zero AO basis set"""
if not self.initialized_shared:
self.initialize_singletons()
self.initialized_shared = True
# Add a dummy atom at the origin, to hold this basis function
self.molecule = Molecule()
self.molecule.add_atom(0, 0.0, 0.0, 0.0)
# Fill with data representing a single S function, at the origin, with 0 exponent
self.n_uprimitive = 1
self.n_shells = 1
self.PYnprimitive = 1
self.PYnao = 1
self.PYnbf = 1
self.uerd_coefficients = [1.0]
self.n_prim_per_shell = [1]
self.uexponents = [0.0]
self.ucoefficients = [1.0]
self.uoriginal_coefficients = [1.0]
self.shell_first_ao = [0]
self.shell_first_basis_function = [0]
self.ao_to_shell = [0]
self.function_to_shell = [0]
self.function_center = [0]
self.shell_center = [0]
self.center_to_nshell = [0]
self.center_to_shell = [0]
self.puream = False
self.PYmax_am = 0
self.PYmax_nprimitive = 1
self.xyz = [0.0, 0.0, 0.0]
self.name = '(Empty Basis Set)'
self.shells = []
self.shells.append(GaussianShell(0, self.PYnprimitive,
self.uoriginal_coefficients, self.ucoefficients, self.uerd_coefficients,
self.uexponents, 'Cartesian', 0, self.xyz, 0))
def constructor_role_mol_shellmap(self, role, mol, shell_map):
"""The most commonly used constructor. Extracts basis set name for *role*
from each atom of *mol*, looks up basis and role entries in the
*shell_map* dictionary, retrieves the GaussianShell objects and returns
the BasisSet.
"""
self.molecule = mol
self.name = role
self.xyz = self.molecule.geometry() # not used in libmints but this seems to be the intent
self.atom_basis_shell = shell_map
natom = self.molecule.natom()
# Singletons
if not self.initialized_shared:
self.initialize_singletons()
self.initialized_shared = True
# These will tell us where the primitives for [basis][symbol] start and end in the compact array
primitive_start = {}
primitive_end = {}
# First, loop over the unique primitives, and store them
uexps = []
ucoefs = []
uoriginal_coefs = []
uerd_coefs = []
self.n_uprimitive = 0
for symbolfirst, symbolsecond in shell_map.items():
label = symbolfirst
basis_map = symbolsecond
primitive_start[label] = {}
primitive_end[label] = {}
for basisfirst, basissecond in basis_map.items():
basis = basisfirst
shells = basis_map[basis] # symbol --> label
primitive_start[label][basis] = self.n_uprimitive # symbol --> label
for i in range(len(shells)):
shell = shells[i]
for prim in range(shell.nprimitive()):
uexps.append(shell.exp(prim))
ucoefs.append(shell.coef(prim))
uoriginal_coefs.append(shell.original_coef(prim))
uerd_coefs.append(shell.erd_coef(prim))
self.n_uprimitive += 1
primitive_end[label][basis] = self.n_uprimitive # symbol --> label
# Count basis functions, shells and primitives
self.n_shells = 0
self.PYnprimitive = 0
self.PYnao = 0
self.PYnbf = 0
for n in range(natom):
atom = self.molecule.atom_entry(n)
basis = atom.basisset(role)
label = atom.label() # symbol --> label
shells = shell_map[label][basis] # symbol --> label
for i in range(len(shells)):
shell = shells[i]
nprim = shell.nprimitive()
self.PYnprimitive += nprim
self.n_shells += 1
self.PYnao += shell.ncartesian()
self.PYnbf += shell.nfunction()
# Allocate arrays
self.n_prim_per_shell = [0] * self.n_shells
# The unique primitives
self.uexponents = [0.0] * self.n_uprimitive
self.ucoefficients = [0.0] * self.n_uprimitive
self.uoriginal_coefficients = [0.0] * self.n_uprimitive
self.uerd_coefficients = [0.0] * self.n_uprimitive
for i in range(self.n_uprimitive):
self.uexponents[i] = uexps[i]
self.ucoefficients[i] = ucoefs[i]
self.uoriginal_coefficients[i] = uoriginal_coefs[i]
self.uerd_coefficients[i] = uerd_coefs[i]
self.shell_first_ao = [0] * self.n_shells
self.shell_first_basis_function = [0] * self.n_shells
self.shells = [None] * self.n_shells
self.ao_to_shell = [0] * self.PYnao
self.function_to_shell = [0] * self.PYnbf
self.function_center = [0] * self.PYnbf
self.shell_center = [0] * self.n_shells
self.center_to_nshell = [0] * natom
self.center_to_shell = [0] * natom
# Now loop over all atoms, and point to the appropriate unique data
shell_count = 0
ao_count = 0
bf_count = 0
xyz_ptr = [0.0, 0.0, 0.0] # libmints seems to be always passing GaussianShell zeros, so following suit
self.puream = False
self.PYmax_am = 0
self.PYmax_nprimitive = 0
for n in range(natom):
atom = self.molecule.atom_entry(n)
basis = atom.basisset(role)
label = atom.label() # symbol --> label
shells = shell_map[label][basis] # symbol --> label
ustart = primitive_start[label][basis] # symbol --> label
uend = primitive_end[label][basis] # symbol --> label
nshells = len(shells)
self.center_to_nshell[n] = nshells
self.center_to_shell[n] = shell_count
atom_nprim = 0
for i in range(nshells):
thisshell = shells[i]
self.shell_first_ao[shell_count] = ao_count
self.shell_first_basis_function[shell_count] = bf_count
shell_nprim = thisshell.nprimitive()
am = thisshell.am()
self.PYmax_nprimitive = max(shell_nprim, self.PYmax_nprimitive)
self.PYmax_am = max(am, self.PYmax_am)
self.shell_center[shell_count] = n
self.puream = thisshell.is_pure()
tst = ustart + atom_nprim
tsp = ustart + atom_nprim + shell_nprim
self.shells[shell_count] = GaussianShell(am, shell_nprim,
self.uoriginal_coefficients[tst:tsp],
self.ucoefficients[tst:tsp],
self.uerd_coefficients[tst:tsp],
self.uexponents[tst:tsp],
'Pure' if self.puream else 'Cartesian',
n, xyz_ptr, bf_count)
for thisbf in range(thisshell.nfunction()):
self.function_to_shell[bf_count] = shell_count
self.function_center[bf_count] = n
bf_count += 1
for thisao in range(thisshell.ncartesian()):
self.ao_to_shell[ao_count] = shell_count
ao_count += 1
atom_nprim += shell_nprim
shell_count += 1
if atom_nprim != uend - ustart:
raise ValidationError("Problem with nprimitive in basis set construction!")
def constructor_basisset_center(self, bs, center):
"""
* Creates a new basis set object for an atom, from an existing basis set
* bs: the basis set to copy data from
* center: the atom in bs to copy over
"""
# Singletons; these should've been initialized by this point, but just in case
if not self.initialized_shared:
self.initialize_singletons()
self.initialized_shared = True
# First, find the shells we need, and grab the data
uexps = []
ucoefs = []
uoriginal_coefs = []
uerd_coefs = []
self.name = bs.name
self.n_shells = 0
self.n_uprimitive = 0
self.PYnao = 0
self.PYnbf = 0
for shelln in range(bs.nshell()):
shell = bs.shell(shelln)
if shell.ncenter() == center:
nprim = shell.nprimitive()
for prim in range(nprim):
uexps.append(shell.exp(prim))
ucoefs.append(shell.coef(prim))
uoriginal_coefs.append(shell.original_coef(prim))
uerd_coefs.append(shell.erd_coef(prim))
self.n_uprimitive += 1
self.n_shells += 1
self.PYnao += shell.ncartesian()
self.PYnbf += shell.nfunction()
self.PYnprimitive = self.n_uprimitive
# Create a "molecule", i.e., an atom, with 1 fragment
mol = bs.molecule
self.molecule = Molecule()
self.molecule.add_atom(mol.Z(center),
mol.x(center), mol.y(center), mol.z(center),
mol.label(center), mol.mass(center), mol.charge(center))
self.molecule.fragments.append([0, 0])
self.molecule.fragment_types.append('Real')
self.molecule.fragment_charges.append(0)
self.molecule.fragment_multiplicities.append(1)
self.molecule.PYmove_to_com = False
self.molecule.set_units('Bohr')
self.molecule.update_geometry()
# Allocate arrays
self.n_prim_per_shell = [0] * self.n_shells
# The unique primitives
self.uexponents = [0.0] * self.n_uprimitive
self.ucoefficients = [0.0] * self.n_uprimitive
self.uoriginal_coefficients = [0.0] * self.n_uprimitive
self.uerd_coefficients = [0.0] * self.n_uprimitive
for i in range(self.n_uprimitive):
self.uexponents[i] = uexps[i]
self.ucoefficients[i] = ucoefs[i]
self.uoriginal_coefficients[i] = uoriginal_coefs[i]
self.uerd_coefficients[i] = uerd_coefs[i]
self.shell_first_ao = [0] * self.n_shells
self.shell_first_basis_function = [0] * self.n_shells
self.shells = [None] * self.n_shells
self.ao_to_shell = [0] * self.PYnao
self.function_to_shell = [0] * self.PYnbf
self.function_center = [0] * self.PYnbf
self.shell_center = [0] * self.n_shells
self.center_to_nshell = [0]
self.center_to_shell = [0]
self.xyz = mol.xyz(center)
# Now loop over shell for this atom, and point to the appropriate unique data
shell_count = 0
ao_count = 0
bf_count = 0
self.puream = False
self.PYmax_am = 0
self.PYmax_nprimitive = 0
prim_count = 0
for shelln in range(bs.nshell()):
shell = bs.shell(shelln)
if shell.ncenter() == center:
self.center_to_nshell[0] = self.n_shells
#self.center_to_shell[0] = shell_count # diff from libmints
self.shell_first_ao[shell_count] = ao_count
self.shell_first_basis_function[shell_count] = bf_count
shell_nprim = shell.nprimitive()
am = shell.am()
self.PYmax_nprimitive = shell_nprim if shell_nprim > self.PYmax_nprimitive else self.PYmax_nprimitive
self.PYmax_am = max(self.PYmax_am, am)
self.shell_center[shell_count] = center
self.puream = shell.is_pure()
tst = prim_count
tsp = prim_count + shell_nprim
self.shells[shell_count] = GaussianShell(am, shell_nprim,
self.uoriginal_coefficients[tst:tsp],
self.ucoefficients[tst:tsp],
self.uerd_coefficients[tst:tsp],
self.uexponents[tst:tsp],
'Pure' if self.puream else 'Cartesian',
center, self.xyz, bf_count)
self.shells[shell_count].pyprint()
for thisbf in range(shell.nfunction()):
self.function_to_shell[bf_count] = shell_count
self.function_center[bf_count] = center
bf_count += 1
for thisao in range(shell.ncartesian()):
self.ao_to_shell[ao_count] = shell_count
ao_count += 1
shell_count += 1
prim_count += shell_nprim
# <<< Methods for Construction by Another Name >>>
@staticmethod
def zero_ao_basis_set():
"""Returns an empty basis set object.
Returns a BasisSet object that actually has a single s-function
at the origin with an exponent of 0.0 and contraction of 1.0.
* @return A new empty BasisSet object.
"""
# In the new implementation, we simply call the default constructor
return BasisSet()
def atomic_basis_set(self, center):
"""Return a BasisSet object containing all shells at center i (0-index)
* Used for Atomic HF computations for SAD Guesses
* @param center Atomic center to provide a basis object for.
* @returns A new basis set object for the atomic center.
"""
return BasisSet(self, center)
@staticmethod
def build(molecule, shells):
"""Builder factory method
* @param molecule the molecule to build the BasisSet around
* @param shells array of *atom-numbered* GaussianShells to build the BasisSet from
* @return BasisSet corresponding to this molecule and set of shells
"""
raise FeatureNotImplemented('BasisSet::build')
#TRIAL# @staticmethod
#TRIAL# def pyconstruct_combined(mol, keys, targets, fitroles, others):
#TRIAL#
#TRIAL# # make sure the lengths are all the same
#TRIAL# if len(keys) != len(targets) or len(keys) != len(fitroles):
#TRIAL# raise ValidationError("""Lengths of keys, targets, and fitroles must be equal""")
#TRIAL#
#TRIAL# # Create (if necessary) and update qcdb.Molecule
#TRIAL# if isinstance(mol, basestring):
#TRIAL# mol = Molecule(mol)
#TRIAL# returnBasisSet = False
#TRIAL# elif isinstance(mol, Molecule):
#TRIAL# returnBasisSet = True
#TRIAL# else:
#TRIAL# raise ValidationError("""Argument mol must be psi4string or qcdb.Molecule""")
#TRIAL# mol.update_geometry()
#TRIAL#
#TRIAL# # load in the basis sets
#TRIAL# sets = []
#TRIAL# name = ""
#TRIAL# for at in range(len(keys)):
#TRIAL# bas = BasisSet.pyconstruct(mol, keys[at], targets[at], fitroles[at], others[at])
#TRIAL# name += targets[at] + " + "
#TRIAL# sets.append(bas)
#TRIAL#
#TRIAL# name = name[:-3].strip()
#TRIAL# # work our way through the sets merging them
#TRIAL# combined_atom_basis_shell = OrderedDict()
#TRIAL# for at in range(len(sets)):
#TRIAL# atom_basis_shell = sets[at].atom_basis_shell
#TRIAL#
#TRIAL# for label, basis_map in atom_basis_shell.items():
#TRIAL# if label not in combined_atom_basis_shell:
#TRIAL# combined_atom_basis_shell[label] = OrderedDict()
#TRIAL# combined_atom_basis_shell[label][name] = []
#TRIAL# for basis, shells in basis_map.items():
#TRIAL# combined_atom_basis_shell[label][name].extend(shells)
#TRIAL#
#TRIAL# #for label, basis_map in combined_atom_basis_shell.items():
#TRIAL# # # sort the shells by angular momentum
#TRIAL# # combined_atom_basis_shell[label][name] = sorted(combined_atom_basis_shell[label][name], key=lambda shell: she
#TRIAL#
#TRIAL# # Molecule and parser prepped, call the constructor
#TRIAL# mol.set_basis_all_atoms(name, "CABS")
#TRIAL#
#TRIAL# # Construct the grand BasisSet for mol
#TRIAL# basisset = BasisSet("CABS", mol, combined_atom_basis_shell)
#TRIAL#
#TRIAL# # Construct all the one-atom BasisSet-s for mol's CoordEntry-s
#TRIAL# for at in range(mol.natom()):
#TRIAL# oneatombasis = BasisSet(basisset, at)
#TRIAL# oneatombasishash = hashlib.sha1(oneatombasis.print_detail(numbersonly=True).encode('utf-8')).hexdigest()
#TRIAL# mol.set_shell_by_number(at, oneatombasishash, role="CABS")
#TRIAL# mol.update_geometry() # re-evaluate symmetry taking basissets into account
#TRIAL#
#TRIAL# text = """ => Creating Basis Set <=\n\n"""
#TRIAL# text += """ Role: %s\n""" % (fitroles)
#TRIAL# text += """ Keyword: %s\n""" % (keys)
#TRIAL# text += """ Name: %s\n""" % (name)
#TRIAL#
#TRIAL# if returnBasisSet:
#TRIAL# print(text)
#TRIAL# return basisset
#TRIAL# else:
#TRIAL# bsdict = {}
#TRIAL# bsdict['message'] = text
#TRIAL# bsdict['name'] = basisset.name
#TRIAL# bsdict['puream'] = int(basisset.has_puream())
#TRIAL# bsdict['shell_map'] = basisset.export_for_libmints("CABS")
#TRIAL# return bsdict
@staticmethod
def pyconstruct(mol, key, target, fitrole='BASIS', other=None, return_atomlist=False):
"""Builds a BasisSet object for *mol* (either a qcdb.Molecule or
a string that can be instantiated into one) from basis set
specifications passed in as python functions or as a string that
names a basis to be applied to all atoms. Always required is the
keyword *key* and string/function *target* of the basis to be
constructed. For orbital basis sets, *key* will likely be 'BASIS'
and, together with *target*, these arguments suffice.
``pyconstruct(smol, "BASIS", basisspec_psi4_yo_631pg_d_p_)``
``pyconstruct(mol, "BASIS", "6-31+G**")``
When building an auxiliary basis, *key* is again the keyword,
*target* is the string or function for the fitting basis (this
may also be an empty string). In case the fitting basis isn't
fully specified, also provide a *fitrole* and the string/function
of the orbital basis as *other*, so that orbital hints can be
used to look up a suitable default basis in BasisFamily.
``pyconstruct(smol, "DF_BASIS_MP2", basisspec_psi4_yo_ccpvdzri, 'RIFIT', basisspec_psi4_yo_631pg_d_p_)``
``pyconstruct(mol, "DF_BASIS_MP2", "", "RIFIT", "6-31+G(d,p)")``
"""
#print(type(mol), type(key), type(target), type(fitrole), type(other))
orbonly = True if (fitrole == 'BASIS' and other is None) else False
if orbonly:
orb = target
aux = None
else:
orb = other
aux = target
#print('BasisSet::pyconstructP', 'key =', key, 'aux =', aux, 'fitrole =', fitrole, 'orb =', orb, 'orbonly =', orbonly) #, mol)
# Create (if necessary) and update qcdb.Molecule
if isinstance(mol, basestring):
mol = Molecule(mol)
returnBasisSet = False
elif isinstance(mol, Molecule):
returnBasisSet = True
else:
raise ValidationError("""Argument mol must be psi4string or qcdb.Molecule""")
mol.update_geometry()
# Apply requested basis set(s) to the molecule
# - basstrings only a temp object so using fitrole as dict key instead of psi4 keyword
# - error checking not needed since C-side already checked for NULL ptr
mol.clear_basis_all_atoms()
# TODO now need to clear shells, too
basstrings = defaultdict(dict)
if orb is None or orb == '':
raise ValidationError("""Orbital basis argument must not be empty.""")
elif callable(orb):
basstrings['BASIS'] = orb(mol, 'BASIS')
elif isinstance(orb, basestring):
mol.set_basis_all_atoms(orb, role='BASIS')
else:
raise ValidationError("""Orbital basis argument must be function that applies basis sets to Molecule or a string of the basis to be applied to all atoms.""")
if aux is None or aux == '':
pass
elif callable(aux):
basstrings[fitrole] = aux(mol, fitrole)
elif isinstance(aux, basestring):
mol.set_basis_all_atoms(aux, role=fitrole)
else:
raise ValidationError("""Auxiliary basis argument must be function that applies basis sets to Molecule or a string of the basis to be applied to all atoms.""")
# Not like we're ever using a non-G94 format
parser = Gaussian94BasisSetParser()
# Molecule and parser prepped, call the constructor
bs, msg = BasisSet.construct(parser, mol, fitrole, None if fitrole == 'BASIS' else fitrole, basstrings[fitrole],
return_atomlist=return_atomlist)
# mol.update_geometry()
text = """ => Loading Basis Set <=\n\n"""
text += """ Role: %s\n""" % (fitrole)
text += """ Keyword: %s\n""" % (key)
text += """ Name: %s\n""" % (target)
text += msg
if return_atomlist:
if returnBasisSet:
return bs
else:
atom_basis_list = []
for atbs in bs:
bsdict = {}
bsdict['message'] = text
bsdict['name'] = atbs.name
bsdict['puream'] = int(atbs.has_puream())
bsdict['shell_map'] = atbs.export_for_libmints(fitrole)
bsdict['molecule'] = atbs.molecule.create_psi4_string_from_molecule(force_c1=True)
atom_basis_list.append(bsdict)
return atom_basis_list
if returnBasisSet:
#print(text)
return bs
else:
bsdict = {}
bsdict['message'] = text
bsdict['name'] = bs.name
bsdict['puream'] = int(bs.has_puream())
bsdict['shell_map'] = bs.export_for_libmints(fitrole)
return bsdict
@classmethod
def construct(cls, parser, mol, role, deffit=None, basstrings=None, return_atomlist=False):
"""Returns a new BasisSet object configured from the *mol*
Molecule object for *role* (generally a Psi4 keyword: BASIS,
DF_BASIS_SCF, etc.). Fails utterly if a basis has not been set for
*role* for every atom in *mol*, unless *deffit* is set (JFIT,
JKFIT, or RIFIT), whereupon empty atoms are assigned to *role*
from the :py:class:`~BasisFamily`. This function is significantly
re-worked from its libmints analog.
"""
# Update geometry in molecule, if there is a problem an exception is thrown.
mol.update_geometry()
# Paths to search for gbs files: here + PSIPATH + library
psidatadir = os.environ.get('PSIDATADIR', None)
#nolongerpredicatble psidatadir = __file__ + '/../../..' if psidatadir is None else psidatadir
libraryPath = ':' + os.path.abspath(psidatadir) + '/basis'
basisPath = os.path.abspath('.') + \
':' + ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':')]) + \
libraryPath
# Validate deffit for role
univdef = {'JFIT': ('def2-qzvpp-jfit', 'def2-qzvpp-jfit', None),
'JKFIT': ('def2-qzvpp-jkfit', 'def2-qzvpp-jkfit', None),
'RIFIT': ('def2-qzvpp-ri', 'def2-qzvpp-ri', None),
'DECON': (None, None, BasisSet.decontract),
'F12': ('def2-qzvpp-f12', 'def2-qzvpp-f12', None)}
if deffit is not None:
if deffit not in univdef.keys():
raise ValidationError("""BasisSet::construct: deffit argument invalid: %s""" % (deffit))
# Map of GaussianShells
atom_basis_shell = OrderedDict()
names = {}
summary = []
for at in range(mol.natom()):
symbol = mol.atom_entry(at).symbol() # O, He
label = mol.atom_entry(at).label() # O3, C_Drot, He
basdict = mol.atom_entry(at).basissets() # {'BASIS': 'sto-3g', 'DF_BASIS_MP2': 'cc-pvtz-ri'}
if label not in atom_basis_shell:
atom_basis_shell[label] = OrderedDict()
# Establish search parameters for what/where basis entries suitable for atom
seek = {}
try:
requested_basname = basdict[role]
except KeyError:
if role == 'BASIS' or deffit is None:
raise BasisSetNotDefined("""BasisSet::construct: No basis set specified for %s and %s.""" %
(symbol, role))
else:
# No auxiliary basis set for atom, so try darnedest to find one.
# This involves querying the BasisFamily for default and
# default-default and finally the universal default (defined
# in this function). Since user hasn't indicated any specifics,
# look only in Psi4's library and for symbol only, not label.
tmp = []
tmp.append(corresponding_basis(basdict['BASIS'], deffit))
#NYI#tmp.append(corresponding_basis(basdict['BASIS'], deffit + '-DEFAULT'))
tmp.append(univdef[deffit])
seek['basis'] = [item for item in tmp if item != (None, None, None)]
seek['entry'] = [symbol]
seek['path'] = libraryPath
seek['strings'] = ''
else:
# User (I hope ... dratted has_changed) has set basis for atom,
# so look only for basis (don't try defaults), look for label (N88)
# or symbol (N) (in that order; don't want to restrict use of atom
# labels to basis set spec), look everywhere (don't just look
# in library)
if requested_basname.lower().endswith('-decon'):
bas_recipe = requested_basname, requested_basname[:-6], BasisSet.decontract
else:
bas_recipe = requested_basname, requested_basname, None
seek['basis'] = [bas_recipe]
seek['entry'] = [symbol] if symbol == label else [label, symbol]
seek['path'] = basisPath
seek['strings'] = '' if basstrings is None else list(basstrings.keys())
# Search through paths, bases, entries
for bas in seek['basis']:
(bastitle, basgbs, postfunc) = bas
filename = cls.make_filename(basgbs)
# -- First seek bas string in input file strings
if filename[:-4] in seek['strings']:
index = 'inputblock %s' % (filename[:-4])
# Store contents
if index not in names:
names[index] = basstrings[filename[:-4]].split('\n')
else:
# -- Else seek bas.gbs file in path
fullfilename = search_file(filename, seek['path'])
if fullfilename is None:
# -- Else skip to next bas
continue
# Store contents so not reloading files
index = 'file %s' % (fullfilename)
if index not in names:
names[index] = parser.load_file(fullfilename)
lines = names[index]
for entry in seek['entry']:
# Seek entry in lines, else skip to next entry
shells, msg = parser.parse(entry, lines)
if shells is None:
continue
# Found!
# -- Post-process
if postfunc:
shells = postfunc(shells)
fmsg = 'func {}'.format(postfunc.__name__)
else:
fmsg = ''
# -- Assign to Molecule
atom_basis_shell[label][bastitle] = shells
mol.set_basis_by_number(at, bastitle, role=role)
summary.append("""entry %-10s %s %s %s""" % (entry, msg, index, fmsg))
break
# Break from outer loop if inner loop breaks
else:
continue
break
else:
# Ne'er found :-(
text2 = """ Shell Entries: %s\n""" % (seek['entry'])
text2 += """ Basis Sets: %s\n""" % (seek['basis'])
text2 += """ File Path: %s\n""" % (', '.join(map(str, seek['path'].split(':'))))
text2 += """ Input Blocks: %s\n""" % (', '.join(seek['strings']))
raise BasisSetNotFound('BasisSet::construct: Unable to find a basis set for atom %d for role %s among:\n%s' % \
(at + 1, role, text2))
# Construct the grand BasisSet for mol
basisset = BasisSet(role, mol, atom_basis_shell)
# Construct all the one-atom BasisSet-s for mol's CoordEntry-s
atom_basis_list = []
for at in range(mol.natom()):
oneatombasis = BasisSet(basisset, at)
oneatombasishash = hashlib.sha1(oneatombasis.print_detail(numbersonly=True).encode('utf-8')).hexdigest()
if return_atomlist:
oneatombasis.molecule.set_shell_by_number(0, oneatombasishash, role=role)
atom_basis_list.append(oneatombasis)
mol.set_shell_by_number(at, oneatombasishash, role=role)
mol.update_geometry() # re-evaluate symmetry taking basissets into account
#TODO fix name
basisset.name = ' + '.join(names)
# Summary printing
tmp = defaultdict(list)
for at, v in enumerate(summary):
tmp[v].append(at + 1)
tmp2 = OrderedDict()
maxsats = 0
for item in sorted(tmp.values()):
for msg, ats in tmp.items():
if item == ats:
G = (list(x) for _, x in itertools.groupby(ats, lambda x, c=itertools.count(): next(c) - x))
sats = ", ".join("-".join(map(str, (g[0], g[-1])[:len(g)])) for g in G)
maxsats = max(maxsats, len(sats))
tmp2[sats] = msg
#text = """ ==> Loading Basis Set <==\n\n"""
#text += """ Role: %s\n""" % (role)
#text += """ Basis Set: %s\n""" % (basisset.name)
text = ''
for ats, msg in tmp2.items():
text += """ atoms %s %s\n""" % (ats.ljust(maxsats), msg)
if return_atomlist:
return atom_basis_list, text
else:
return basisset, text
# <<< Simple Methods for Basic BasisSet Information >>>
def name(self):
"""Returns the name of this basis set"""
return self.name
def set_name(self, name):
"""Sets the name of this basis set"""
self.name = name
# JET added but I think should fail
#+ def atom_shell_map(self):
#+ return self.atom_shell_map
def nprimitive(self):
"""Number of primitives.
* @return The total number of primitives in all contractions.
"""
return self.PYnprimitive
def max_nprimitive(self):
"""Maximum number of primitives in a shell.
* Examines each shell and find the shell with the maximum number of primitives returns that
* number of primitives.
* @return Maximum number of primitives.
"""
return self.PYmax_nprimitive
def nshell(self):
"""Number of shells.
* @return Number of shells.
"""
return self.n_shells
def nao(self):
"""Number of atomic orbitals (Cartesian).
* @return The number of atomic orbitals (Cartesian orbitals, always).
"""
return self.PYnao
def nbf(self):
"""Number of basis functions (Spherical).
* @return The number of basis functions (Spherical, if has_puream() == true).
"""
return self.PYnbf
def max_am(self):
"""Maximum angular momentum used in the basis set.
* @return Maximum angular momentum.
"""
return self.PYmax_am
def has_puream(self):
"""Spherical harmonics?
* @return true if using spherical harmonics
"""
return self.puream
def max_function_per_shell(self):
"""Compute the maximum number of basis functions contained in a shell.
* @return The max number of basis functions in a shell.
"""
return 2 * self.PYmax_am + 1 if self.puream else (self.PYmax_am + 1) * (self.PYmax_am + 2) / 2
def molecule(self):
"""Molecule this basis is for.
* @return Shared pointer to the molecule for this basis set.
"""
return self.molecule
def shell_to_ao_function(self, i):
"""Given a shell what is its first AO function
* @param i Shell number
* @return The function number for the first function for the i'th shell.
"""
return self.shell_first_ao[i]
def shell_to_center(self, i):
"""Given a shell what is its atomic center
* @param i Shell number
* @return The atomic center for the i'th shell.
"""
return self.shell_center[i]
def shell_to_basis_function(self, i):
"""Given a shell what is its first basis function (spherical) function
* @param i Shell number
* @return The function number for the first function for the i'th shell.
"""
return self.shell_first_basis_function[i]
def function_to_shell(self, i):
"""Given a function number what shell does it correspond to."""
return self.function_to_shell[i]
def function_to_center(self, i):
"""Given a function what is its atomic center
* @param i Function number
* @return The atomic center for the i'th function.
"""
return self.function_center[i]
def ao_to_shell(self, i):
"""Given a Cartesian function (AO) number what shell does it correspond to."""
return self.ao_to_shell[i]
def shell(self, si, center=None):
"""Return the si'th Gaussian shell on center
* @param i Shell number
* @return A shared pointer to the GaussianShell object for the i'th shell.
"""
if center is not None:
si += self.center_to_shell[center]
if si < 0 or si > self.nshell():
text = """BasisSet::shell(si = %d), requested a shell out-of-bound.\n Max shell size: %d\n Name: %s\n""" % \
(si, self.nshell(), self.name())
raise ValidationError("BasisSet::shell: requested shell is out-of-bounds:\n%s" % (text))
return self.shells[si]
def nshell_on_center(self, i):
"""Return the number of shells on a given center."""
return self.center_to_nshell[i]
def shell_on_center(self, center, shell):
"""Return the overall shell number"""
return self.center_to_shell[center] + shell
# <<< Methods for Printing >>>
def print_by_level(self, out=None, level=2):
"""Print basis set information according to the level of detail in print_level
@param out The file stream to use for printing. Defaults to outfile.
@param print_level: defaults to 2
* < 1: Nothing
* 1: Brief summary
* 2: Summary and contraction details
* > 2: Full details
"""
if level < 1:
return
elif level == 1:
text = self.pyprint(out=None)
elif level == 2:
text = self.print_summary(out=None)
elif level > 2:
text = self.print_detail(out=None)
if out is None:
print(text)
else:
with open(out, mode='w') as handle:
handle.write(text)
def pyprint(self, out=None):
"""Print the basis set.
* @param out The file stream to use for printing. Defaults to outfile.
"""
text = ''
text += """ Basis Set: %s\n""" % (self.name)
text += """ Number of shells: %d\n""" % (self.nshell())
text += """ Number of basis function: %d\n""" % (self.nbf())
text += """ Number of Cartesian functions: %d\n""" % (self.nao())
text += """ Spherical Harmonics?: %s\n""" % ('true' if self.has_puream() else 'false')
text += """ Max angular momentum: %d\n\n""" % (self.max_am())
#text += """ Source:\n%s\n""" % (self.source()) # TODO
if out is None:
return text
else:
with open(outfile, mode='w') as handle:
handle.write(text)
def print_summary(self, out=None):
"""Prints a short string summarizing the basis set
* @param out The file stream to use for printing. Defaults to outfile.
"""
text = ''
text += """ -AO BASIS SET INFORMATION:\n"""
text += """ Name = %s\n""" % (self.name)
text += """ Total number of shells = %d\n""" % (self.nshell())
text += """ Number of primitives = %d\n""" % (self.nprimitive())
text += """ Number of AO = %d\n""" % (self.nao())
text += """ Number of SO = %d\n""" % (self.nbf())
text += """ Maximum AM = %d\n""" % (self.max_am())
text += """ Spherical Harmonics = %s\n""" % ('TRUE' if self.puream else 'FALSE')
text += """\n"""
text += """ -Contraction Scheme:\n"""
text += """ Atom Type All Primitives // Shells:\n"""
text += """ ------ ------ --------------------------\n"""
for A in range(self.molecule.natom()):
nprims = [0] * (self.PYmax_am + 1)
nunique = [0] * (self.PYmax_am + 1)
nshells = [0] * (self.PYmax_am + 1)
amtypes = [None] * (self.PYmax_am + 1)
text += """ %4d """ % (A + 1)
text += """%2s """ % (self.molecule.symbol(A))
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
for Q in range(n_shell):
shell = self.shells[Q + first_shell]
nshells[shell.am()] += 1
nunique[shell.am()] += shell.nprimitive()
nprims[shell.am()] += shell.nprimitive()
amtypes[shell.am()] = shell.amchar()
# All Primitives
for l in range(self.PYmax_am + 1):
if nprims[l] == 0:
continue
text += """%d%c """ % (nprims[l], amtypes[l])
# Shells
text += """// """
for l in range(self.PYmax_am + 1):
if nshells[l] == 0:
continue
text += """%d%c """ % (nshells[l], amtypes[l])
text += """\n"""
text += """\n"""
if out is None:
return text
else:
with open(out, mode='w') as handle:
handle.write(text)
def print_detail(self, out=None, numbersonly=False):
"""Prints a detailed PSI3-style summary of the basis (per-atom)
* @param out The file stream to use for printing. Defaults to outfile.
"""
text = ''
if not numbersonly:
text += self.print_summary(out=None)
text += """ ==> AO Basis Functions <==\n"""
text += '\n'
text += """ [ %s ]\n""" % (self.name)
text += """ spherical\n""" if self.has_puream() else """ cartesian\n"""
text += """ ****\n"""
for uA in range(self.molecule.nunique()):
A = self.molecule.unique(uA)
if not numbersonly:
text += """ %2s %3d\n""" % (self.molecule.symbol(A), A + 1)
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
for Q in range(n_shell):
text += self.shells[Q + first_shell].pyprint(outfile=None)
text += """ ****\n"""
text += """\n"""
if out is None:
return text
else:
with open(out, mode='w') as handle:
handle.write(text)
def export_for_libmints(self, role):
"""From complete BasisSet object, returns array where
triplets of elements are each unique atom label, the hash
of the string shells entry in gbs format and the
shells entry in gbs format for that label. This packaging is
intended for return to libmints BasisSet::pyconstruct for
instantiation of a libmints BasisSet clone of *self*.
"""
basstrings = []
tally = []
for A in range(self.molecule.natom()):
if self.molecule.label(A) not in tally:
label = self.molecule.label(A)
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
basstrings.append(label)
basstrings.append(self.molecule.atoms[A].shell(key=role))
text = """ %s 0\n""" % (label)
for Q in range(n_shell):
text += self.shells[Q + first_shell].pyprint(outfile=None)
text += """ ****\n"""
basstrings.append(text)
return basstrings
def print_detail_gamess(self, out=None, numbersonly=False):
"""Prints a detailed PSI3-style summary of the basis (per-atom)
* @param out The file stream to use for printing. Defaults to outfile.
"""
text = ''
if not numbersonly:
text += self.print_summary(out=None)
text += """ ==> AO Basis Functions <==\n"""
text += '\n'
text += """ [ %s ]\n""" % (self.name)
text += """ spherical\n""" if self.has_puream() else """ cartesian\n"""
text += """ ****\n"""
for uA in range(self.molecule.nunique()):
A = self.molecule.unique(uA)
if not numbersonly:
text += """%s\n""" % (z2element[self.molecule.Z(A)])
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
for Q in range(n_shell):
text += self.shells[Q + first_shell].pyprint_gamess(outfile=None)
#text += """ ****\n"""
text += """\n"""
if out is None:
return text
else:
with open(out, mode='w') as handle:
handle.write(text)
def print_detail_cfour(self, out=None):
"""Returns a string in CFOUR-style of the basis (per-atom)
* Format from http://slater.chemie.uni-mainz.de/cfour/index.php?n=Main.OldFormatOfAnEntryInTheGENBASFile
"""
text = ''
for uA in range(self.molecule.nunique()):
A = self.molecule.unique(uA)
text += """%s:P4_%d\n""" % (self.molecule.symbol(A), A + 1)
text += """Psi4 basis %s for element %s atom %d\n\n""" % \
(self.name.upper(), self.molecule.symbol(A), A + 1)
first_shell = self.center_to_shell[A]
n_shell = self.center_to_nshell[A]
max_am_center = 0
for Q in range(n_shell):
max_am_center = self.shells[Q + first_shell].am() if \
self.shells[Q + first_shell].am() > max_am_center else max_am_center
shell_per_am = [[] for i in range(max_am_center + 1)]
for Q in range(n_shell):
shell_per_am[self.shells[Q + first_shell].am()].append(Q)
# Write number of shells in the basis set
text += """%3d\n""" % (max_am_center + 1)
# Write angular momentum for each shell
for am in range(max_am_center + 1):
text += """%5d""" % (am)
text += '\n'
# Write number of contracted basis functions for each shell
for am in range(max_am_center + 1):
text += """%5d""" % (len(shell_per_am[am]))
text += '\n'
exp_per_am = [[] for i in range(max_am_center + 1)]
coef_per_am = [[] for i in range(max_am_center + 1)]
for am in range(max_am_center + 1):
# Collect unique exponents among all functions
for Q in range(len(shell_per_am[am])):
for K in range(self.shells[shell_per_am[am][Q] + first_shell].nprimitive()):
if self.shells[shell_per_am[am][Q] + first_shell].exp(K) not in exp_per_am[am]:
exp_per_am[am].append(self.shells[shell_per_am[am][Q] + first_shell].exp(K))
# Collect coefficients for each exp among all functions, zero otherwise
for Q in range(len(shell_per_am[am])):
K = 0
for ep in range(len(exp_per_am[am])):
if abs(exp_per_am[am][ep] - self.shells[shell_per_am[am][Q] + first_shell].exp(K)) < 1.0e-8:
coef_per_am[am].append(self.shells[shell_per_am[am][Q] + first_shell].original_coef(K))
if (K + 1) != self.shells[shell_per_am[am][Q] + first_shell].nprimitive():
K += 1
else:
coef_per_am[am].append(0.0)
# Write number of exponents for each shell
for am in range(max_am_center + 1):
text += """%5d""" % (len(exp_per_am[am]))
text += '\n\n'
for am in range(max_am_center + 1):
# Write exponents for each shell
for ep in range(len(exp_per_am[am])):
text += """%14.7f""" % (exp_per_am[am][ep])
if ((ep + 1) % 5 == 0) or ((ep + 1) == len(exp_per_am[am])):
text += '\n'
text += '\n'
# Write contraction coefficients for each shell
for ep in range(len(exp_per_am[am])):
for bf in range(len(shell_per_am[am])):
text += """%10.7f """ % (coef_per_am[am][bf * len(exp_per_am[am]) + ep])
text += '\n'
text += '\n'
if out is None:
return text
else:
with open(out, mode='w') as handle:
handle.write(text)
# <<< Misc. Methods >>>
def refresh(self):
"""Refresh internal basis set data. Useful if someone has pushed
to shells. Pushing to shells happens in the BasisSetParsers, so
the parsers will call refresh(). This function is now defunct.
"""
raise FeatureNotImplemented('BasisSet::refresh')
@staticmethod
def make_filename(name):
"""Converts basis set name to a compatible filename.
* @param basisname Basis name
* @return Compatible file name.
"""
# Modify the name of the basis set to generate a filename: STO-3G -> sto-3g
basisname = name
# First make it lower case
basisname = basisname.lower()
# Replace all '(' with '_'
basisname = basisname.replace('(', '_')
# Replace all ')' with '_'
basisname = basisname.replace(')', '_')
# Replace all ',' with '_'
basisname = basisname.replace(',', '_')
# Replace all '*' with 's'
basisname = basisname.replace('*', 's')
# Replace all '+' with 'p'
basisname = basisname.replace('+', 'p')
# Add file extension
basisname += '.gbs'
return basisname
@staticmethod
def decontract(shells):
"""Procedure applied to list to GaussianShell-s *shells* that returns
another list of shells, one for every AM and exponent pair in the input
list. Decontracts the shells.
"""
# vector of uncontracted shells to return
shell_list = []
# map of AM to a vector of exponents for duplicate basis functions check
exp_map = defaultdict(list)
for shell in shells:
am = shell.am()
pure = shell.is_pure()
nc = shell.ncenter()
center = shell.center
start = shell.start
for prim in range(shell.nprimitive()):
exp = shell.exp(prim)
unique = True
for _exp in exp_map[am]:
if abs(exp - _exp) < 1.0e-6:
unique = False
if unique:
us = ShellInfo(am, [1.0], [exp],
'Pure' if pure else 'Cartesian',
nc, center, start, 'Unnormalized')
shell_list.append(us)
exp_map[am].append(exp)
return shell_list
# <<< Methods not Implemented >>>
def zero_so_basis_set(cls, factory):
""" **NYI** Returns an empty SO basis set object.
* Returns an SOBasis object that actually has a single s-function
* at the origin with an exponent of 0.0 and contraction of 1.0.
* @return A new empty SOBasis object.
"""
raise FeatureNotImplemented('BasisSet::zero_so_basis_set') # FINAL
@staticmethod
def test_basis_set(max_am):
"""Returns a shell-labeled test basis set object
* @param max_am maximum angular momentum to build
* @return pair containing shell labels and four-center
* test basis for use in benchmarking
* See libmints/benchmark.cc for details
The libmints version seems not to have been updated along with the classes.
"""
raise FeatureNotImplemented('BasisSet::test_basis_set')
def get_ao_sorted_shell(self, i):
"""Returns the value of the sorted shell list. Defunct"""
raise FeatureNotImplemented('BasisSet::get_ao_sorted_shell')
def get_ao_sorted_list(self):
"""Returns the vector of sorted shell list. Defunct"""
raise FeatureNotImplemented('BasisSet::get_ao_sorted_list')
def compute_phi(self, phi_ao, x, y, z):
"""Returns the values of the basis functions at a point"""
phi_ao = [0.0] * self.nao()
ao = 0
for ns in range(self.nshell()):
shell = self.shells[ns]
am = shell.am()
nprim = shell.nprimitive()
a = shell.exps()
c = shell.coefs()
xyz = shell.center()
dx = x - xyz[0]
dy = y - xyz[1]
dz = z - xyz[2]
rr = dx * dx + dy * dy + dz * dz
cexpr = 0
for np in range(nprim):
cexpr += c[np] * math.exp(-a[np] * rr)
for l in range(INT_NCART(am)):
components = exp_ao[am][l]
phi_ao[ao + l] += pow(dx, components[0]) * \
pow(dy, components[1]) * \
pow(dz, components[2]) * \
cexpr
ao += INT_NCART(am)
def concatenate(self, b):
"""Concatenates two basis sets together into a new basis without
reordering anything. Unless you know what you're doing, you should
use the '+' operator instead of this method. Appears defunct.
"""
raise FeatureNotImplemented('BasisSet::concatenate')
def add(self, b):
"""Adds this plus another basis set and returns the result.
Equivalent to the '+' operator. Appears defunct.
"""
raise FeatureNotImplemented('BasisSet::add')
@staticmethod
def shell_sorter_ncenter(d1, d2):
return d1.ncenter() < d2.ncenter()
@staticmethod
def shell_sorter_am(d1, d2):
return d1.am() < d2.am()
|
mhlechner/psi4
|
psi4/driver/qcdb/libmintsbasisset.py
|
Python
|
gpl-2.0
| 58,094
|
[
"CFOUR",
"Gaussian",
"Psi4"
] |
2f34be413512a902aed8038c04ed9bca9f069fb941c143455525301e35dbc82d
|
"""Abstract classes for :class:`fixture.base.Fixture` descendants that load / unload data
See :ref:`Using LoadableFixture<using-loadable-fixture>` for examples.
"""
# from __future__ import with_statement
__all__ = ['LoadableFixture', 'EnvLoadableFixture', 'DBLoadableFixture', 'DeferredStoredObject']
import sys, types
from fixture.base import Fixture
from fixture.util import ObjRegistry, _mklog
from fixture.style import OriginalStyle
from fixture.dataset import Ref, dataset_registry, DataRow, is_rowlike
from fixture.exc import UninitializedError, LoadError, UnloadError, StorageMediaNotFound
import logging
log = _mklog("fixture.loadable")
treelog = _mklog("fixture.loadable.tree")
class StorageMediumAdapter(object):
"""common interface for working with storable objects.
"""
def __init__(self, medium, dataset):
self.medium = medium
self.dataset = dataset
self.transaction = None
def __getattr__(self, name):
return getattr(self.obj, name)
def __repr__(self):
return "%s at %s for %s" % (
self.__class__.__name__, hex(id(self)), self.medium)
def clear(self, obj):
"""Must clear the stored object.
"""
raise NotImplementedError
def clearall(self):
"""Must clear all stored objects.
"""
log.info("CLEARING stored objects for %s", self.dataset)
for obj in self.dataset.meta._stored_objects:
try:
self.clear(obj)
except Exception, e:
etype, val, tb = sys.exc_info()
raise UnloadError(etype, val, self.dataset,
stored_object=obj), None, tb
def save(self, row, column_vals):
"""Given a DataRow, must save it somehow.
column_vals is an iterable of (column_name, column_value)
"""
raise NotImplementedError
def visit_loader(self, loader):
"""A chance to visit the LoadableFixture object.
By default it does nothing.
"""
pass
class LoadQueue(ObjRegistry):
"""Keeps track of what class instances were loaded.
"level" is used like so:
The lower the level, the lower that object is on the foreign key chain.
As the level increases, this means more foreign objects depend on the
local object. Thus, objects need to be unloaded starting at the lowest
level and working up. Also, since objects can appear multiple times in
foreign key chains, the queue only acknowledges the object at its
highest level, since this will ensure all dependencies get unloaded
before it.
"""
def __init__(self):
ObjRegistry.__init__(self)
self.tree = {}
self.limit = {}
def __repr__(self):
return "<%s at %s>" % (
self.__class__.__name__, hex(id(self)))
def _pushid(self, id, level):
if id in self.limit:
# only store the object at its highest level:
if level > self.limit[id]:
self.tree[self.limit[id]].remove(id)
del self.limit[id]
else:
return
self.tree.setdefault(level, [])
self.tree[level].append(id)
self.limit[id] = level
def clear(self):
"""clear internal registry"""
ObjRegistry.clear(self)
# this is an attempt to free up refs to database connections:
self.tree = {}
self.limit = {}
def register(self, obj, level):
"""register this object as "loaded" at level
"""
id = ObjRegistry.register(self, obj)
self._pushid(id, level)
return id
def referenced(self, obj, level):
"""tell the queue that this object was referenced again at level.
"""
id = self.id(obj)
self._pushid(id, level)
def to_unload(self):
"""yields a list of objects in an order suitable for unloading.
"""
level_nums = self.tree.keys()
level_nums.sort()
treelog.info("*** unload order ***")
for level in level_nums:
unload_queue = self.tree[level]
verbose_obj = []
for id in unload_queue:
obj = self.registry[id]
verbose_obj.append(obj.__class__.__name__)
yield obj
treelog.info("%s. %s", level, verbose_obj)
class LoadableFixture(Fixture):
"""
knows how to load data into something useful.
This is an abstract class and cannot be used directly. You can use a
LoadableFixture that already knows how to load into a specific medium,
such as SQLAlchemyFixture, or create your own to build your own to load
DataSet objects into custom storage media.
Keyword Arguments:
dataclass
class to instantiate with datasets (defaults to that of Fixture)
style
a Style object to translate names with (defaults to NamedDataStyle)
medium
optional LoadableFixture.StorageMediumAdapter to store DataSet
objects with
"""
style = OriginalStyle()
dataclass = Fixture.dataclass
def __init__(self, style=None, medium=None, **kw):
Fixture.__init__(self, loader=self, **kw)
if style:
self.style = style
if medium:
self.Medium = medium
self.loaded = None
StorageMediumAdapter = StorageMediumAdapter
Medium = StorageMediumAdapter
StorageMediaNotFound = StorageMediaNotFound
LoadQueue = LoadQueue
def attach_storage_medium(self, ds):
"""attach a :class:`StorageMediumAdapter` to DataSet"""
raise NotImplementedError
def begin(self, unloading=False):
"""begin loading"""
if not unloading:
self.loaded = self.LoadQueue()
def commit(self):
"""commit load transaction"""
raise NotImplementedError
def load(self, data):
"""load data"""
def loader():
for ds in data:
self.load_dataset(ds)
self.wrap_in_transaction(loader, unloading=False)
def load_dataset(self, ds, level=1):
"""load this dataset and all its dependent datasets.
level is essentially the order of processing (going from dataset to
dependent datasets). Child datasets are always loaded before the
parent. The level is important for visualizing the chain of
dependencies : 0 is the bottom, and thus should be the first set of
objects unloaded
"""
is_parent = level==1
levsep = is_parent and "/--------" or "|__.."
treelog.info(
"%s%s%s (%s)", level * ' ', levsep, ds.__class__.__name__,
(is_parent and "parent" or level))
for ref_ds in ds.meta.references:
r = ref_ds.shared_instance(default_refclass=self.dataclass)
new_level = level+1
self.load_dataset(r, level=new_level)
self.attach_storage_medium(ds)
if ds in self.loaded:
# keep track of its order but don't actually load it...
self.loaded.referenced(ds, level)
return
log.info("LOADING rows in %s", ds)
ds.meta.storage_medium.visit_loader(self)
registered = False
for key, row in ds:
try:
self.resolve_row_references(ds, row)
if not isinstance(row, DataRow):
row = row(ds)
def column_vals():
for c in row.columns():
yield (c, self.resolve_stored_object(getattr(row, c)))
obj = ds.meta.storage_medium.save(row, column_vals())
ds.meta._stored_objects.store(key, obj)
# save the instance in place of the class...
ds._setdata(key, row)
if not registered:
self.loaded.register(ds, level)
registered = True
except Exception, e:
etype, val, tb = sys.exc_info()
raise LoadError(etype, val, ds, key=key, row=row), None, tb
def resolve_row_references(self, current_dataset, row):
"""resolve this DataRow object's referenced values.
"""
def resolved_rowlike(rowlike):
key = rowlike.__name__
if rowlike._dataset is type(current_dataset):
return DeferredStoredObject(rowlike._dataset, key)
loaded_ds = self.loaded[rowlike._dataset]
return loaded_ds.meta._stored_objects.get_object(key)
def resolve_stored_object(candidate):
if is_rowlike(candidate):
return resolved_rowlike(candidate)
else:
# then it is the stored object itself. this would happen if
# there is a reciprocal foreign key (i.e. organization has a
# parent organization)
return candidate
for name in row.columns():
val = getattr(row, name)
if type(val) in (types.ListType, types.TupleType):
# i.e. categories = [python, ruby]
setattr(row, name, map(resolve_stored_object, val))
elif is_rowlike(val):
# i.e. category = python
setattr(row, name, resolved_rowlike(val))
elif isinstance(val, Ref.Value):
# i.e. category_id = python.id.
ref = val.ref
# now the ref will return the attribute from a stored object
# when __get__ is invoked
ref.dataset_obj = self.loaded[ref.dataset_class]
def rollback(self):
"""rollback load transaction"""
raise NotImplementedError
def then_finally(self, unloading=False):
"""called in a finally block after load transaction has begun"""
pass
def unload(self):
"""unload data"""
if self.loaded is None:
raise UninitializedError(
"Cannot unload data because it has not yet been loaded in this "
"process. Call data.setup() before data.teardown()")
def unloader():
for dataset in self.loaded.to_unload():
self.unload_dataset(dataset)
self.loaded.clear()
dataset_registry.clear()
self.wrap_in_transaction(unloader, unloading=True)
def unload_dataset(self, dataset):
"""unload data stored for this dataset"""
dataset.meta.storage_medium.clearall()
def wrap_in_transaction(self, routine, unloading=False):
"""call routine in a load transaction"""
self.begin(unloading=unloading)
try:
try:
routine()
except:
self.rollback()
raise
else:
self.commit()
finally:
self.then_finally(unloading=unloading)
class EnvLoadableFixture(LoadableFixture):
"""An abstract fixture that can resolve DataSet objects from an env.
Keyword "env" should be a dict or a module if not None.
According to the style rules, the env will be used to find objects by name.
"""
def __init__(self, env=None, **kw):
LoadableFixture.__init__(self, **kw)
self.env = env
def attach_storage_medium(self, ds):
"""Lookup a storage medium in the ``env`` and attach it to a DataSet.
A storage medium is looked up by name. If a specific name has not been declared in the DataSet
then it will be guessed using the :meth:`Style.guess_storable_name <fixture.style.Style.guess_storable_name>` method.
Once a name is found (typically the name of a DataSet class, say, EmployeeData) then it is looked up
in the ``env`` which is expected to be a dict or module like object.
The method first tries ``env.get('EmployeeData')`` then ``getattr(env, 'EmployeeData')``.
The return value is the storage medium (i.e. a data mapper for the Employees table)
Note that a :mod:`style <fixture.style>` might translate a name to maintain a consistent
naming scheme between DataSet classes and data mappers.
"""
if ds.meta.storage_medium is not None:
# already attached...
return
storable = ds.meta.storable
if not storable:
if not ds.meta.storable_name:
ds.meta.storable_name = self.style.guess_storable_name(
ds.__class__.__name__)
if hasattr(self.env, 'get'):
storable = self.env.get(ds.meta.storable_name, None)
if not storable:
if hasattr(self.env, ds.meta.storable_name):
try:
storable = getattr(self.env, ds.meta.storable_name)
except AttributeError:
pass
if not storable:
repr_env = repr(type(self.env))
if hasattr(self.env, '__module__'):
repr_env = "%s from '%s'" % (repr_env, self.env.__module__)
raise self.StorageMediaNotFound(
"could not find %s '%s' for "
"dataset %s in self.env (%s)" % (
self.Medium, ds.meta.storable_name, ds, repr_env))
if storable == ds.__class__:
raise ValueError(
"cannot use %s %s as a storable object of itself! "
"(perhaps your style object was not configured right?)" % (
ds.__class__.__name__, ds.__class__))
ds.meta.storage_medium = self.Medium(storable, ds)
def resolve_stored_object(self, column_val):
if type(column_val)==DeferredStoredObject:
return column_val.get_stored_object_from_loader(self)
else:
return column_val
class DBLoadableFixture(EnvLoadableFixture):
"""
An abstract fixture that can load a DataSet into a database like thing.
More specifically, one that forces its implementation to run atomically
(within a begin / commit / rollback block).
"""
def __init__(self, dsn=None, **kw):
EnvLoadableFixture.__init__(self, **kw)
self.dsn = dsn
self.transaction = None
def begin(self, unloading=False):
"""begin loading data"""
EnvLoadableFixture.begin(self, unloading=unloading)
self.transaction = self.create_transaction()
def commit(self):
"""call transaction.commit() on transaction returned by :meth:`DBLoadableFixture.create_transaction`"""
self.transaction.commit()
def create_transaction(self):
"""must return a transaction object that implements commit() and rollback()
.. note:: transaction.begin() will not be called. If that is necessary then call begin before returning the object.
"""
raise NotImplementedError
def rollback(self):
"""call transaction.rollback() on transaction returned by :meth:`DBLoadableFixture.create_transaction`"""
self.transaction.rollback()
class DeferredStoredObject(object):
"""A stored representation of a row in a DataSet, deferred.
The actual stored object can only be resolved by the StoredMediumAdapter
itself
Imagine...::
>>> from fixture import DataSet
>>> class PersonData(DataSet):
... class adam:
... father=None
... class eve:
... father=None
... class jenny:
... pass
... jenny.father = adam
...
This would be a way to indicate that jenny's father is adam. This class
will encapsulate that reference so it can be resolved as close to when it
was created as possible.
"""
def __init__(self, dataset, key):
self.dataset = dataset
self.key = key
def get_stored_object_from_loader(self, loader):
loaded_ds = loader.loaded[self.dataset]
return loaded_ds.meta._stored_objects.get_object(self.key)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
patrickod/fixture
|
fixture/loadable/loadable.py
|
Python
|
lgpl-2.1
| 16,777
|
[
"VisIt"
] |
6637f85ac3c633d0ce19065982375108c9d4b2b92b2bc65ee0c7d556d1462e3b
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Execute computations asynchronously using threads or processes."""
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import sys
from concurrent.futures._base import (FIRST_COMPLETED,
FIRST_EXCEPTION,
ALL_COMPLETED,
CancelledError,
TimeoutError,
Future,
Executor,
wait,
as_completed)
if sys.platform != 'uwp':
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
|
ms-iot/python
|
cpython/Lib/concurrent/futures/__init__.py
|
Python
|
bsd-3-clause
| 842
|
[
"Brian"
] |
3482ef1b68eb20484ec35eb9a99c76f416637de5a301472c98692b3cb010ab90
|
import lmfit
import numpy as np
import json
import functools
import operator
import suspect.basis
# this is the underlying function for the GaussianPeak model class
def gaussian(in_data, amplitude, frequency, phase, fwhm):
return amplitude * in_data.inherit(suspect.basis.gaussian(in_data.time_axis(), frequency, phase, fwhm))
# this is the underlying function for the GlobalPhase model class
def phase_shift(in_data, phase0, phase1):
return in_data.spectrum().inherit(np.ones_like(in_data)).adjust_phase(phase0, phase1)
# this is the underlying function for combining the models together
def apply_in_freq_domain(model, phase_shift):
return (model.spectrum() * phase_shift).fid()
class GaussianPeak(lmfit.Model):
"""
Class to represent a Gaussian peak for fitting.
The Gaussian peak is parameterised by 4 values: amplitude, frequency,
phase, and FWHM (full width at half maximum).
Each parameter can be specified in three different ways:
1. Passing a numeric value sets the initial guess for that parameter
2. Passing a string of a number fixes that parameter to that value
3. Passing a dictionary allows setting any of the constraints supported
by the underlying LMFit parameter: value, min, max, vary, and expr
By default the phase of the peak will be fixed at 0 and the amplitude
and FWHM will be constrained to be bigger than 0 and 1Hz respectively.
Parameters
----------
name
The name of the peak
amplitude
The amplitude (area) of the peak
frequency
The frequency of the peak in Hertz
phase
The phase of the peak in radians
fwhm
The full width at half maximum of the peak in Hertz
"""
def __init__(self, name, amplitude=1, frequency=0, phase="0", fwhm=20):
lcls = locals()
params = {p: lcls[p] for p in ["amplitude", "phase", "frequency", "fwhm"]}
super().__init__(gaussian, prefix="{}_".format(name))
for name, value in params.items():
if isinstance(value, str):
self.set_param_hint(name, value=float(value), vary=False)
elif isinstance(value, dict):
self.set_param_hint(name, **value)
else:
self.set_param_hint(name, value=value)
if not "min" in self.param_hints["amplitude"]:
self.set_param_hint("amplitude", min=0)
if not "min" in self.param_hints["fwhm"]:
self.set_param_hint("fwhm", min=1)
class Model:
"""
A model of an MRS FID signal which can be fitted to data.
This model is created by passing a set
of individual peak models, to which it then appends a phase model. By
default the first order phase is constrained to be 0.
Parameters
----------
peak_models
The descriptions of the peaks making up the model.
phase0
The estimated zero order phase in radians.
phase1
The estimated first order phase in radians per Hz.
"""
def __init__(self, peak_models, phase0=0, phase1="0"):
phase_model = lmfit.model.Model(phase_shift)
phase_model.set_param_hint("phase0", value=0)
phase_model.set_param_hint("phase1", value=0, min=0, max=16e-3)
params = {
"phase0": phase0,
"phase1": phase1
}
for name, value in params.items():
if isinstance(value, str):
phase_model.set_param_hint(name, value=float(value), vary=False)
elif isinstance(value, dict):
phase_model.set_param_hint(name, **value)
else:
phase_model.set_param_hint(name, value=value)
self.composite_model = lmfit.model.CompositeModel(peak_models,
phase_model,
apply_in_freq_domain)
def fit(self, data, baseline_points=4):
"""
Perform a fit of the model to an FID.
Parameters
----------
data
The time domain data to be fitted.
baseline_points
The first baseline_points of the FID will be ignored in the fit.
Returns
-------
ModelResult
"""
params = self.composite_model.make_params()
weights = np.ones_like(data, dtype=np.float)
weights[:baseline_points] = 0
return self.composite_model.fit(data,
params=params,
in_data=data,
weights=weights)
@classmethod
def load(cls, filename):
with open(filename) as fin:
model_dict = json.load(fin)
return cls.from_dict(model_dict)
@classmethod
def from_dict(cls, model_dict):
"""
Create a model from a dict.
Parameters
----------
model_dict
dict describing the model.
Returns
-------
Model
The specified model ready for fitting.
"""
phase0 = model_dict.get("phase0", 0)
phase1 = model_dict.get("phase1", "0")
peak_params = (GaussianPeak(k, **v) for (k, v) in model_dict.items()
if k not in ["phase0", "phase1"])
peak_models = functools.reduce(operator.add, peak_params)
return cls(peak_models, phase0, phase1)
|
openmrslab/suspect
|
suspect/fitting/singlet.py
|
Python
|
mit
| 5,453
|
[
"Gaussian"
] |
52040d0afb62140ebb0c1fa299c36c01dae37e5c7f050b3396c4b1bf87d5d8ae
|
#!/bin/env python
""" Reset failed requests and operations therein """
__RCSID__ = "$Id: $"
import sys
from DIRAC.Core.Base import Script
maxReset = 100
Script.registerSwitch( '', 'Job=', ' jobID: reset requests for jobID' )
Script.registerSwitch( '', 'Failed', ' reset Failed requests' )
Script.registerSwitch( '', 'Maximum=', ' max number of requests to reset' )
Script.setUsageMessage( '\n'.join( [ __doc__,
'Usage:',
' %s [option|cfgfile] [requestName|requestID]' % Script.scriptName,
'Arguments:',
' requestName: a request name' ] ) )
# # execution
if __name__ == "__main__":
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import DIRAC
resetFailed = False
requestName = ''
job = None
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
reqClient = ReqClient()
for switch in Script.getUnprocessedSwitches():
if switch[0] == 'Failed':
resetFailed = True
elif switch[0] == 'Maximum':
try:
maxReset = int( switch[1] )
except:
pass
elif switch[0] == 'Job':
try:
job = int( switch[1] )
except:
print "Invalid jobID", switch[1]
if not job:
args = Script.getPositionalArgs()
if len( args ) == 1:
requestName = args[0]
else:
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
res = dirac.attributes( job )
if not res['OK']:
print "Error getting job parameters", res['Message']
else:
jobName = res['Value'].get( 'JobName' )
if not jobName:
print 'Job %d not found' % job
else:
requestName = jobname + '_job_%d' % job
requests = []
if requestName:
requests = [requestName]
elif resetFailed:
res = reqClient.getRequestNamesList( ['Failed'], maxReset );
if not res['OK']:
print "Error", res['Message'];
elif res['Value']:
requests = res['Value']
if not requests:
print "No requests to reset"
Script.showHelp()
else:
reset = 0
notReset = 0
freq = 10
if len( requests ) > freq:
print "Resetting now %d requests (. each %d requests)" % ( len( requests ), freq )
else:
freq = 0
for reqName in requests:
reqName = reqName[0]
toReset = True
if len( requests ) < maxReset:
req = reqClient.peekRequest( reqName ).get( 'Value' )
if not req:
continue
for op in req:
if op.Status == 'Failed':
if not op.Type.startswith( 'Remove' ):
for f in op:
if f.Status == 'Failed' and f.Error == 'No such file or directory':
toReset = False
notReset += 1
break
break
if toReset:
ret = reqClient.resetFailedRequest( reqName )
if not ret['OK']:
print "Error", ret['Message']
else:
if freq and ( reset % freq ) == 0:
sys.stdout.write( '.' )
sys.stdout.flush()
reset += 1
if reset:
print "\nReset", reset, 'Requests'
if notReset:
print "Not reset (File doesn't exist) %d requests" % notReset
|
avedaee/DIRAC
|
RequestManagementSystem/scripts/dirac-dms-reset-request.py
|
Python
|
gpl-3.0
| 3,325
|
[
"DIRAC"
] |
d3b49ba5b05dec50012dcfe18be641bc681c3c6d5ce58eb1e042b8ddd1559d17
|
# -*- coding: utf-8 -*-
#
# You can specify the sources you want to read in this file. All sources must
# be placed inside of
# sources = [
# SOURCE1
# SOURCE2
# ...
# ]
# Each source should look like
# {
# 'setting_name': 'setting_value',
# ...
# },
#
# For output* templating syntax please read
# http://www.simple-is-better.org/template/pyratemp.html
#
# Available functions:
# * http://www.simple-is-better.org/template/pyratemp.html#expressions
# * ftime(date, format)
# For format of strftime method, please read
# http://docs.python.org/library/time.html#time.strftime
# * surl(url) - Shorten url
#
# All commented settings are the defualt values, if you need to customize, then
# remove the prefixing '#' character.
sources = [
{
# Twitter: Statuses of you and whom you follow
'type': 'twitter',
#src_name': 'Twitter',
'username': 'username',
# Get your keys here: http://dev.twitter.com/apps/new
'consumer_key': 'YOUR_KEY', # Double Check :)
'consumer_secret': 'YOUR_SECRET', # Double Check :)
#interval': 90,
# status.tweet_link - URL of Tweet
#'output': '@!ansi.fgreen!@@!ftime(status["created_at"], "%H:%M:%S")!@@!ansi.freset!@ [@!src_name!@] @!ansi.fyellow!@@!status["user"]["screen_name"]!@@!ansi.freset!@: @!unescape(status["text"])!@ @!ansi.fmagenta!@@!surl(status["tweet_link"])!@@!ansi.freset!@'
},
{
'type': 'twittersearch',
#'src_name': 'TwitterSearch',
# You can make one easier at http://search.twitter.com/advanced
'q': 'the search term',
# How many returned result in one query, upto 100
#'rpp': 15,
# A valid ISO 639-1 code (http://en.wikipedia.org/wiki/ISO_639-1)
#'lang': 'en',
#'interval': 60,
},
{
# FriendFeed Home Realtime - Only activiies after run, no session data will be stored
# Item structure can be found at http://code.google.com/p/friendfeed-api/wiki/ApiDocumentation#Reading_FriendFeed_Feeds
'type': 'friendfeed',
#src_name': 'FriendFeed',
'nickname': 'nickname',
'remote_key': 'secret',
#'interval': 60,
# Available object: entry, ansi, src_name
# entry["_link"] - URL of entry
#'output': '@!ansi.fgreen!@@!ftime(entry["updated"], "%H:%M:%S")!@@!ansi.freset!@ [@!src_name!@] @!ansi.fyellow!@@!entry["user"]["nickname"]!@@!ansi.freset!@:<!--(if "room" in entry)--> @!ansi.fiyellow!@[@!entry["room"]["name"]!@]@!ansi.freset!@<!--(end)--> @!ansi.fcyan!@@!entry["title"]!@@!ansi.freset!@ @!ansi.fmagenta!@@!surl(entry["_link"])!@@!ansi.freset!@',
# Available object: entry, like, ansi, src_name
#'output_like': '@!ansi.fgreen!@@!ftime(like["date"], "%H:%M:%S")!@@!ansi.freset!@ [@!src_name!@] @!ansi.fyellow!@@!like["user"]["nickname"]!@@!ansi.freset!@ @!ansi.fired!@♥@!ansi.freset!@ @!ansi.fcyan!@@!entry["title"]!@@!ansi.freset!@ @!ansi.fmagenta!@@!surl(entry["_link"])!@@!ansi.freset!@',
# Available object: entry, comment, ansi, src_name
#'output_comment': '@!ansi.fgreen!@@!ftime(comment["date"], "%H:%M:%S")!@@!ansi.freset!@ [@!src_name!@] @!ansi.fyellow!@@!comment["user"]["nickname"]!@@!ansi.freset!@ ✎ @!ansi.fcyan!@@!entry["title"]!@@!ansi.freset!@: @!comment["body"]!@ @!ansi.fmagenta!@@!surl(entry["_link"])!@@!ansi.freset!@',
#'show_like': True,
#'show_comment': True,
# You may have set hidding some people's item, you can decide if you
# still want to see them.
#'show_hidden': False,
},
{
# Feed: Normal feed
'type': 'feed',
#src_name': 'Feed',
'feed': 'http://example.com/feed',
#'interval': 60,
#'output': '@!ansi.fgreen!@@!ftime(entry["updated"], "%H:%M:%S")!@@!ansi.freset!@ [@!src_name!@] @!entry["title"]!@ @!ansi.fmagenta!@@!surl(entry.link)!@@!ansi.freset!@',
},
{
# GMail: Mails in inbox
'type': 'gmail',
#'src_name': 'Gmail',
'email': 'email@gmail.com',
'password': 'secret',
# use 'all' for all emails, or it will be just unread emails in inbox folder
#'label': '',
#'interval': 60,
#'output': '@!ansi.fgreen!@@!ftime(entry["updated"], "%H:%M:%S")!@@!ansi.freset!@ @!ansi.fred!@[@!src_name!@]@!ansi.freset!@ @!ansi.fyellow!@@!entry["author"]!@@!ansi.freset!@: @!entry["title"]!@ @!ansi.fmagenta!@@!surl(entry["link"])!@@!ansi.freset!@',
},
{
# Google Reader: Items of subscriptions
'type': 'greader',
#'src_name': 'GReader',
'email': 'email@gmail.com',
'password': 'secret',
#'interval': 60,
#'output': '@!ansi.fgreen!@@!ftime(entry["updated"], "%H:%M:%S")!@@!ansi.freset!@ [@!src_name!@] @!ansi.fyellow!@@!entry["source"]["title"]!@@!ansi.freset!@: @!entry["title"]!@ @!ansi.fmagenta!@@!surl(entry["link"])!@@!ansi.freset!@',
},
{
# YouTube: Items of subscriptions
'type': 'youtube',
#'src_name': 'YouTube',
'username': 'username',
#'interval': 60,
#'output': '@!ansi.fgreen!@@!ftime(entry["updated"], "%H:%M:%S")!@@!ansi.freset!@ [@!src_name!@] @!entry["title"]!@ @!ansi.fmagenta!@@!surl(entry.link)!@@!ansi.freset!@',
},
{
# Weather.com
# Weather.com has very restricted License Agreement (for cli program),
# if you don't care about that, please do not use my license key.
# You find it in clis.py, replace it with yours.
# Note that you can only use three weather sources (Weather.com's XOAP License Aggrement)
'type': 'weather',
#'src_name': 'Weather',
# Search for locid, visit http://xoap.weather.com/search/search?where=[locationname]
# For example http://xoap.weather.com/search/search?where=taipei
# Returns
# <search ver="3.0">
# <loc id="TWXX0021" type="1">Taipei, Taiwan</loc>
# </search>
# Where TWXX0021 is the locid
'locid': '***Read above***',
# Set of units. s for Standard or m for Metric
#'unit': 'm',
# Update interval in minutes, must be 25 or greater
#'interval': 30,
# There are four promotion links in output (Weather.com's XOAP License Aggrement), please do not remove them.
#'output': '@!ansi.fgreen!@@!ftime(weather["cc"]["lsup"], "%H:%M:%S")!@@!ansi.freset!@ @!ansi.fred!@[@!src_name!@]@!ansi.freset!@ @!ansi.fyellow!@@!weather["cc"]["obst"]!@@!ansi.freset!@ Temperature: @!weather["cc"]["tmp"]!@°@!weather["head"]["ut"]!@ Feels like: @!weather["cc"]["flik"]!@°@!weather["head"]["ut"]!@ Conditions: @!weather["cc"]["t"]!@ Wind: <!--(if weather["cc"]["wind"]["s"] == "calm")-->calm<!--(else)-->@!weather["cc"]["wind"]["s"]!@@!weather["head"]["us"]!@ (@!int(float(weather["cc"]["wind"]["s"]) * 0.6214)!@mph) (@!weather["cc"]["wind"]["t"]!@)<!--(end)--> (Provided by weather.com; @!weather["lnks"]["link"][0]["t"]!@: @!surl(weather["lnks"]["link"][0]["l"])!@ @!weather["lnks"]["link"][1]["t"]!@: @!surl(weather["lnks"]["link"][1]["l"])!@ @!weather["lnks"]["link"][2]["t"]!@: @!surl(weather["lnks"]["link"][2]["l"])!@ @!weather["lnks"]["link"][3]["t"]!@: @!surl(weather["lnks"]["link"][3]["l"])!@)',
{
# PunBB 1.2: Special for PunBB 1.2's feed
'type': 'punbb12',
#src_name': 'Feed',
'feed': 'http://example.com/extern.php?action=active&type=RSS',
#'interval': 60,
#'output': '@!ansi.fgreen!@@!ftime(entry["updated"], "%H:%M:%S")!@@!ansi.freset!@ [@!src_name!@] @!entry["title"]!@ @!ansi.fmagenta!@@!surl(entry.link)!@@!ansi.freset!@',
},
{
# Tail: tail -F
'type': 'tail',
#src_name': 'Tail',
# The file you want to tail
'file': '~/test.txt',
#'output': '@!ansi.fgreen!@@!ftime(entry["updated"], "%H:%M:%S")!@@!ansi.freset!@ [@!src_name!@] @!entry["title"]!@ @!ansi.fmagenta!@@!surl(entry.link)!@@!ansi.freset!@',
# How many last lines to be print when firstly runs
#'last_lines': 0,
},
]
# The setting of local url shortening server
#server = {
# 'name': 'localhost',
# 'port': 8080,
# }
}
|
livibetter/clis
|
samples/clis.cfg.py
|
Python
|
mit
| 8,297
|
[
"VisIt"
] |
da22c3bae89c34b9806142c6d39ac48e3708efc2cb62995b75bde893870eedc9
|
import numpy as np
from numpy.linalg import inv
import os, os.path
### IPR specific stuff ###
class FortranError(Exception):
'''When fortran doesn't keep spaces between numbers so everything goes to
shit because fortran is such a decrepit POS'''
class GulpError(Exception):
'''gulp runtime error'''
class VaspError(Exception):
'''gulp runtime error'''
class DataError(Exception):
'''Incorrectly assembled snapshot'''
### IPR specific stuff ###
class Data:
def __init__(self):
'''
Data objects must have an input, and output attributes.
inputs:
cell = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
atoms = [ ('A', [0,0,0], 'B', [0.5,0.5,0.5]) ]
outputs:
energy = float()
forces = [[0, 0, 0], [0, 0, 0]]
stresses = [0, 0, 0, 0, 0, 0]
'''
self.cell = [[]]
self.atoms = []
self.energy = 0.0
self.stresses = [0,0,0,0,0,0]
self.forces = [[]]
@staticmethod
def read_path(search_path):
data = []
for (path, dirs, files) in os.walk(search_path):
for file in files:
if 'OUTCAR' in file:
data += Data.read_outcar(path+'/'+file)
if set(['energy', 'stresses', 'forces', 'POSCAR']) < set(files):
data.append(Data.read_snapshot(path))
return data
@property
def inputs(self):
return {'cell':self.cell,
'atoms':self.atoms}
@property
def outputs(self):
return {'energy': self.energy,
'forces': self.forces,
'stresses': self.stresses}
@staticmethod
def read_snapshot(snapdir):
### test that path is a snapshot ###
if not ( os.path.exists(snapdir+'/POSCAR') and
os.path.exists(snapdir+'/energy') and
os.path.exists(snapdir+'/forces') and
os.path.exists(snapdir+'/stresses')):
raise DataError
snapshot = Data()
snapshot.energy = float(open(snapdir+'/energy').read())
snapshot.stresses = [ float(f) for f in
open(snapdir+'/stresses').read().split() ]
poscar = open(snapdir+'/POSCAR').readlines()
snapshot.cell = [ [ float(f) for f in vec.split() ]
for vec in poscar[2:6] ]
## test vasp4/5, direct/cart
atom_counts = []
v5line = poscar[6].lower()
if v5line[0].isalpha():
### has a species line -> vasp5
atom_types = v5line.split()
atom_types = [ d.split('_')[0] for d in poscar ]
atom_counts = [ int(f) for f in poscar[7].split() ]
dline = poscar[8].lower()
atoms = poscar[9:sum(atom_counts)+1]
else:
### doesn't -> vasp4
if not os.path.exists(snapdir+'/atom_types'):
raise DataError
atom_types = open(snapdir+'/atom_types').read()
atom_types = atom_types.strip().split()
atom_counts = [ int(f) for f in dline ]
v5line = poscar[7].lower()
atoms = poscar[8:sum(atom_counts)+1]
atom_array = []
for type, count in zip(atom_types, atom_counts):
atom_array += [type]*count
if v5line.startswith('d'):
cart = False
else:
cart = True
inv_cell = inv(snapshot.cell)
for elt, atom in zip(atom_array, atoms):
coord = [ float(f) for f in atom.split() ]
if cart:
coord = dot(inv_cell, coord)
snapshot.coords.append((elt, list(coord)))
forces = open(snapdir+'/forces').readlines()
if not len(forces) == len(snapshot.coords):
raise DataError
snapshot.forces = [ [ float(f) for f in line ] for line in forces ]
return snapshot
@staticmethod
def read_outcar(outcar):
data = open(outcar).readlines()
snapshots = []
atom_types = []
atom_counts = []
atom_array = []
snapshot = Data()
for n,line in enumerate(data):
if 'POTCAR:' in line:
temp = line.split()[2]
for c in ['.','_','1']:
if c in temp:
temp = temp[0:temp.find(c)]
atom_types += [temp]
if 'ions per type' in line:
atom_types = atom_types[:len(atom_types)/2]
atom_counts = [ int(f) for f in line.split()[4:] ]
for type, count in zip(atom_types, atom_counts):
atom_array += [type]*count
if 'direct lattice vectors' in line:
cell = []
for i in range(3):
temp = data[n+1+i].split()
cell += [[float(temp[0]), float(temp[1]), float(temp[2])]]
inv_cell = inv(cell)
snapshot.cell = cell
if 'FREE ENERGIE OF THE ION-ELECTRON SYSTEM' in line:
snapshot.energy = float(data[n+4].split()[6])
if 'STRESS in cart' in line:
for iline in range(20):
nline = data[n+iline]
if 'Total' in nline:
snapshot.stresses = [ float(f) for f in
nline.split()[1:]]
break
if 'POSITION ' in line:
forces = []
atoms = []
for iatom, elt in enumerate(atom_array):
temp = data[n+2+iatom].split()
forces += [[float(f) for f in temp[3:6]]]
atoms += [(elt,
np.dot(inv_cell, [float(f) for f in temp[0:3]])
)]
snapshot.forces = forces
snapshot.coords = atoms
snapshots.append(snapshot)
snapshot = Data()
return snapshots
|
wolverton-research-group/fitpot
|
fitpot/data.py
|
Python
|
mit
| 6,057
|
[
"GULP"
] |
5d9e690a1e95047f4bc4a02d4f34243338a370ca4925665c1fc003d6e11d0400
|
################################################################################
# Copyright (C) 2011-2013 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
import numpy as np
import matplotlib.pyplot as plt
import h5py
import tempfile
import bayespy.plot as bpplt
from bayespy.utils import misc
from bayespy.utils import random
from bayespy.inference.vmp import nodes
from bayespy.inference.vmp.vmp import VB
def pca_model(M, N, D):
# Construct the PCA model with ARD
# ARD
alpha = nodes.Gamma(1e-2,
1e-2,
plates=(D,),
name='alpha')
# Loadings
W = nodes.Gaussian(np.zeros(D),
alpha.as_diagonal_wishart(),
name="W",
plates=(M,1))
# States
X = nodes.Gaussian(np.zeros(D),
np.identity(D),
name="X",
plates=(1,N))
# PCA
WX = nodes.Dot(W, X, name="WX")
# Noise
tau = nodes.Gamma(1e-2, 1e-2, name="tau", plates=())
# Noisy observations
Y = nodes.GaussianARD(WX, tau, name="Y", plates=(M,N))
return (Y, WX, W, X, tau, alpha)
@bpplt.interactive
def run(M=10, N=100, D_y=3, D=5):
seed = 45
print('seed =', seed)
np.random.seed(seed)
# Check HDF5 version.
if h5py.version.hdf5_version_tuple < (1,8,7):
print("WARNING! Your HDF5 version is %s. HDF5 versions <1.8.7 are not "
"able to save empty arrays, thus you may experience problems if "
"you for instance try to save before running any iteration steps."
% str(h5py.version.hdf5_version_tuple))
# Generate data
w = np.random.normal(0, 1, size=(M,1,D_y))
x = np.random.normal(0, 1, size=(1,N,D_y))
f = misc.sum_product(w, x, axes_to_sum=[-1])
y = f + np.random.normal(0, 0.5, size=(M,N))
# Construct model
(Y, WX, W, X, tau, alpha) = pca_model(M, N, D)
# Data with missing values
mask = random.mask(M, N, p=0.9) # randomly missing
mask[:,20:40] = False # gap missing
y[~mask] = np.nan
Y.observe(y, mask=mask)
# Construct inference machine
Q = VB(Y, W, X, tau, alpha, autosave_iterations=5)
# Initialize some nodes randomly
X.initialize_from_value(X.random())
W.initialize_from_value(W.random())
# Save the state into a HDF5 file
filename = tempfile.NamedTemporaryFile(suffix='hdf5').name
Q.update(X, W, alpha, tau, repeat=1)
Q.save(filename=filename)
# Inference loop.
Q.update(X, W, alpha, tau, repeat=10)
# Reload the state from the HDF5 file
Q.load(filename=filename)
# Inference loop again.
Q.update(X, W, alpha, tau, repeat=10)
# NOTE: Saving and loading requires that you have the model
# constructed. "Save" does not store the model structure nor does "load"
# read it. They are just used for reading and writing the contents of the
# nodes. Thus, if you want to load, you first need to construct the same
# model that was used for saving and then use load to set the states of the
# nodes.
plt.clf()
WX_params = WX.get_parameters()
fh = WX_params[0] * np.ones(y.shape)
err_fh = 2*np.sqrt(WX_params[1] + 1/tau.get_moments()[0]) * np.ones(y.shape)
for m in range(M):
plt.subplot(M,1,m+1)
#errorplot(y, error=None, x=None, lower=None, upper=None):
bpplt.errorplot(fh[m], x=np.arange(N), error=err_fh[m])
plt.plot(np.arange(N), f[m], 'g')
plt.plot(np.arange(N), y[m], 'r+')
plt.figure()
Q.plot_iteration_by_nodes()
plt.figure()
plt.subplot(2,2,1)
bpplt.binary_matrix(W.mask)
plt.subplot(2,2,2)
bpplt.binary_matrix(X.mask)
plt.subplot(2,2,3)
#bpplt.binary_matrix(WX.get_mask())
plt.subplot(2,2,4)
bpplt.binary_matrix(Y.mask)
if __name__ == '__main__':
run()
plt.show()
|
SalemAmeen/bayespy
|
bayespy/demos/saving.py
|
Python
|
mit
| 4,028
|
[
"Gaussian"
] |
38dd10e1f179c0ae029e5dcddf6122c41f717064d42981fd3d0d0ca4c61093b3
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License
"""
Module for implementing a CTRL file object class for the Stuttgart
LMTO-ASA code. It will primarily be used to generate a pymatgen
Structure object in the pymatgen.electronic_structure.cohp.py module.
"""
import re
import numpy as np
from monty.io import zopen
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ry_to_eV, bohr_to_angstrom
from pymatgen.electronic_structure.core import Spin
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.num import round_to_sigfigs
__author__ = "Marco Esters"
__copyright__ = "Copyright 2017, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Marco Esters"
__email__ = "esters@uoregon.edu"
__date__ = "Nov 30, 2017"
class LMTOCtrl:
"""
Class for parsing CTRL files from the Stuttgart LMTO-ASA code.
Currently, only HEADER, VERS and the structure can be used.
"""
def __init__(self, structure, header=None, version="LMASA-47"):
"""
Args:
structure: The structure as a pymatgen Structure object.
header: The header for the CTRL file .
Defaults to None.
version: The LMTO version that is used for the VERS category.
Defaults to the newest version (4.7).
"""
self.structure = structure
self.header = header
self.version = version
def __eq__(self, other):
return self.get_string() == other.get_string()
def __repr__(self):
"""
Representation of the CTRL file is as a string.
"""
return self.get_string()
def __str__(self):
"""
String representation of the CTRL file.
"""
return self.get_string()
def get_string(self, sigfigs=8):
"""
Generates the string representation of the CTRL file. This is
the mininmal CTRL file necessary to execute lmhart.run.
"""
ctrl_dict = self.as_dict()
lines = [] if "HEADER" not in ctrl_dict else ["HEADER".ljust(10) + self.header]
if "VERS" in ctrl_dict:
lines.append("VERS".ljust(10) + self.version)
lines.append("STRUC".ljust(10) + "ALAT=" + str(round(ctrl_dict["ALAT"], sigfigs)))
for l, latt in enumerate(ctrl_dict["PLAT"]):
if l == 0:
line = "PLAT=".rjust(15)
else:
line = " ".ljust(15)
line += " ".join([str(round(v, sigfigs)) for v in latt])
lines.append(line)
for cat in ["CLASS", "SITE"]:
for a, atoms in enumerate(ctrl_dict[cat]):
if a == 0:
line = [cat.ljust(9)]
else:
line = [" ".ljust(9)]
for token, val in sorted(atoms.items()):
if token == "POS":
line.append("POS=" + " ".join([str(round(p, sigfigs)) for p in val]))
else:
line.append(token + "=" + str(val))
line = " ".join(line)
lines.append(line)
return "\n".join(lines) + "\n"
def as_dict(self):
"""
Returns the CTRL as a dictionary. "SITE" and "CLASS" are of
the form {'CATEGORY': {'TOKEN': value}}, the rest is of the
form 'TOKEN'/'CATEGORY': value. It gets the conventional standard
structure because primitive cells use the conventional
a-lattice parameter as the scaling factor and not the a-lattice
parameter of the primitive cell.
"""
ctrl_dict = {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
if self.header is not None:
ctrl_dict["HEADER"] = self.header
if self.version is not None:
ctrl_dict["VERS"] = self.version
sga = SpacegroupAnalyzer(self.structure)
alat = sga.get_conventional_standard_structure().lattice.a
plat = self.structure.lattice.matrix / alat
"""
The following is to find the classes (atoms that are not symmetry
equivalent, and create labels. Note that LMTO only attaches
numbers with the second atom of the same species, e.g. "Bi", "Bi1",
"Bi2", etc.
"""
eq_atoms = sga.get_symmetry_dataset()["equivalent_atoms"]
ineq_sites_index = list(set(eq_atoms))
sites = []
classes = []
num_atoms = {}
for s, site in enumerate(self.structure.sites):
atom = site.specie
label_index = ineq_sites_index.index(eq_atoms[s])
if atom.symbol in num_atoms:
if label_index + 1 > sum(num_atoms.values()):
num_atoms[atom.symbol] += 1
atom_label = atom.symbol + str(num_atoms[atom.symbol] - 1)
classes.append({"ATOM": atom_label, "Z": atom.Z})
else:
num_atoms[atom.symbol] = 1
classes.append({"ATOM": atom.symbol, "Z": atom.Z})
sites.append({"ATOM": classes[label_index]["ATOM"], "POS": site.coords / alat})
ctrl_dict.update(
{
"ALAT": alat / bohr_to_angstrom,
"PLAT": plat,
"CLASS": classes,
"SITE": sites,
}
)
return ctrl_dict
def write_file(self, filename="CTRL", **kwargs):
"""
Writes a CTRL file with structure, HEADER, and VERS that can be
used as input for lmhart.run.
"""
with zopen(filename, "wt") as f:
f.write(self.get_string(**kwargs))
@classmethod
def from_file(cls, filename="CTRL", **kwargs):
"""
Creates a CTRL file object from an existing file.
Args:
filename: The name of the CTRL file. Defaults to 'CTRL'.
Returns:
An LMTOCtrl object.
"""
with zopen(filename, "rt") as f:
contents = f.read()
return LMTOCtrl.from_string(contents, **kwargs)
@classmethod
def from_string(cls, data, sigfigs=8):
"""
Creates a CTRL file object from a string. This will mostly be
used to read an LMTOCtrl object from a CTRL file. Empty spheres
are ignored.
Args:
data: String representation of the CTRL file.
Returns:
An LMTOCtrl object.
"""
lines = data.split("\n")[:-1]
struc_lines = {
"HEADER": [],
"VERS": [],
"SYMGRP": [],
"STRUC": [],
"CLASS": [],
"SITE": [],
}
for line in lines:
if line != "" and not line.isspace():
if not line[0].isspace():
cat = line.split()[0]
if cat in struc_lines:
struc_lines[cat].append(line)
else:
pass
for cat in struc_lines:
struc_lines[cat] = " ".join(struc_lines[cat]).replace("= ", "=")
structure_tokens = {"ALAT": None, "PLAT": [], "CLASS": [], "SITE": []}
for cat in ["STRUC", "CLASS", "SITE"]:
fields = struc_lines[cat].split("=") # pylint: disable=E1101
for f, field in enumerate(fields):
token = field.split()[-1]
if token == "ALAT":
alat = round(float(fields[f + 1].split()[0]), sigfigs)
structure_tokens["ALAT"] = alat
elif token == "ATOM":
atom = fields[f + 1].split()[0]
if not bool(re.match("E[0-9]*$", atom)):
if cat == "CLASS":
structure_tokens["CLASS"].append(atom)
else:
structure_tokens["SITE"].append({"ATOM": atom})
else:
pass
elif token in ["PLAT", "POS"]:
try:
arr = np.array([round(float(i), sigfigs) for i in fields[f + 1].split()])
except ValueError:
arr = np.array([round(float(i), sigfigs) for i in fields[f + 1].split()[:-1]])
if token == "PLAT":
structure_tokens["PLAT"] = arr.reshape([3, 3])
elif not bool(re.match("E[0-9]*$", atom)):
structure_tokens["SITE"][-1]["POS"] = arr
else:
pass
else:
pass
try:
spcgrp_index = struc_lines["SYMGRP"].index("SPCGRP")
spcgrp = struc_lines["SYMGRP"][spcgrp_index : spcgrp_index + 12]
structure_tokens["SPCGRP"] = spcgrp.split("=")[1].split()[0]
except ValueError:
pass
for token in ["HEADER", "VERS"]:
try:
value = re.split(token + r"\s*", struc_lines[token])[1]
structure_tokens[token] = value.strip()
except IndexError:
pass
return LMTOCtrl.from_dict(structure_tokens)
@classmethod
def from_dict(cls, d):
"""
Creates a CTRL file object from a dictionary. The dictionary
must contain the items "ALAT", PLAT" and "SITE".
Valid dictionary items are:
ALAT: the a-lattice parameter
PLAT: (3x3) array for the lattice vectors
SITE: list of dictionaries: {'ATOM': class label,
'POS': (3x1) array of fractional
coordinates}
CLASS (optional): list of unique atom labels as str
SPCGRP (optional): space group symbol (str) or number (int)
HEADER (optional): HEADER text as a str
VERS (optional): LMTO version as a str
Args:
d: The CTRL file as a dictionary.
Returns:
An LMTOCtrl object.
"""
for cat in ["HEADER", "VERS"]:
if cat not in d:
d[cat] = None
alat = d["ALAT"] * bohr_to_angstrom
plat = d["PLAT"] * alat
species = []
positions = []
for site in d["SITE"]:
species.append(re.split("[0-9*]", site["ATOM"])[0])
positions.append(site["POS"] * alat)
# Only check if the structure is to be generated from the space
# group if the number of sites is the same as the number of classes.
# If lattice and the spacegroup don't match, assume it's primitive.
if "CLASS" in d and "SPCGRP" in d and len(d["SITE"]) == len(d["CLASS"]):
try:
structure = Structure.from_spacegroup(d["SPCGRP"], plat, species, positions, coords_are_cartesian=True)
except ValueError:
structure = Structure(
plat,
species,
positions,
coords_are_cartesian=True,
to_unit_cell=True,
)
else:
structure = Structure(plat, species, positions, coords_are_cartesian=True, to_unit_cell=True)
return cls(structure, header=d["HEADER"], version=d["VERS"])
class LMTOCopl:
"""
Class for reading COPL files, which contain COHP data.
.. attribute: cohp_data
Dict that contains the COHP data of the form:
{bond: {"COHP": {Spin.up: cohps, Spin.down:cohps},
"ICOHP": {Spin.up: icohps, Spin.down: icohps},
"length": bond length}
.. attribute: efermi
The Fermi energy in Ry or eV.
.. attribute: energies
Sequence of energies in Ry or eV.
.. attribute: is_spin_polarized
Boolean to indicate if the calculation is spin polarized.
"""
def __init__(self, filename="COPL", to_eV=False):
"""
Args:
filename: filename of the COPL file. Defaults to "COPL".
to_eV: LMTO-ASA gives energies in Ry. To convert energies into
eV, set to True. Defaults to False for energies in Ry.
"""
# COPL files have an extra trailing blank line
with zopen(filename, "rt") as f:
contents = f.read().split("\n")[:-1]
# The parameters line is the second line in a COPL file. It
# contains all parameters that are needed to map the file.
parameters = contents[1].split()
num_bonds = int(parameters[0])
if int(parameters[1]) == 2:
spins = [Spin.up, Spin.down]
self.is_spin_polarized = True
else:
spins = [Spin.up]
self.is_spin_polarized = False
# The COHP data start in row num_bonds + 3
data = np.array([np.array(row.split(), dtype=float) for row in contents[num_bonds + 2 :]]).transpose()
if to_eV:
# LMTO energies have 5 sig figs
self.energies = np.array(
[round_to_sigfigs(energy, 5) for energy in data[0] * Ry_to_eV],
dtype=float,
)
self.efermi = round_to_sigfigs(float(parameters[-1]) * Ry_to_eV, 5)
else:
self.energies = data[0]
self.efermi = float(parameters[-1])
cohp_data = {}
for bond in range(num_bonds):
label, length, sites = self._get_bond_data(contents[2 + bond])
cohp = {spin: data[2 * (bond + s * num_bonds) + 1] for s, spin in enumerate(spins)}
if to_eV:
icohp = {
spin: np.array([round_to_sigfigs(i, 5) for i in data[2 * (bond + s * num_bonds) + 2] * Ry_to_eV])
for s, spin in enumerate(spins)
}
else:
icohp = {spin: data[2 * (bond + s * num_bonds) + 2] for s, spin in enumerate(spins)}
# This takes care of duplicate labels
if label in cohp_data:
i = 1
lab = "%s-%d" % (label, i)
while lab in cohp_data:
i += 1
lab = "%s-%d" % (label, i)
label = lab
cohp_data[label] = {
"COHP": cohp,
"ICOHP": icohp,
"length": length,
"sites": sites,
}
self.cohp_data = cohp_data
@staticmethod
def _get_bond_data(line):
"""
Subroutine to extract bond label, site indices, and length from
a COPL header line. The site indices are zero-based, so they
can be easily used with a Structure object.
Example header line: Fe-1/Fe-1-tr(-1,-1,-1) : 2.482 Ang.
Args:
line: line in the COHPCAR header describing the bond.
Returns:
The bond label, the bond length and a tuple of the site
indices.
"""
line = line.split()
length = float(line[2])
# Replacing "/" with "-" makes splitting easier
sites = line[0].replace("/", "-").split("-")
site_indices = tuple(int(ind) - 1 for ind in sites[1:4:2])
species = tuple(re.split(r"\d+", spec)[0] for spec in sites[0:3:2])
label = "%s%d-%s%d" % (
species[0],
site_indices[0] + 1,
species[1],
site_indices[1] + 1,
)
return label, length, site_indices
|
vorwerkc/pymatgen
|
pymatgen/io/lmto.py
|
Python
|
mit
| 15,527
|
[
"pymatgen"
] |
0258c11c6f524d7ccdd78a16c744d9ecfa4e0ce3423355d82439bcc504d21b03
|
'''
Run an assimilation cycle using the Kalman Filter; for each assimilation window:
1. observations are obtained (simulated from the truth plus a random realization of R);
2. an analysis is computed;
3. the analysis is integrated using the model to produce the forecast;
4. covariance matrices are propagated using the Kalman Filter equations;
5. the forecast and covariance is then used for the next assimilation window.
Observation, forecast and model errors statistics need to be provided:
- correlation model
- correlation length
- bias (0 by default)
- variance (constant on the domain)
The script plots the truth and forecast trajectories as well as the forecast and analysis variances evolution in time.
'''
from sys import stdout
import numpy as np
from numpy import pi
import matplotlib.pyplot as plt
from DM93 import AdvectionDiffusionModel
from DM93 import Covariance, Uncorrelated, Foar, Soar, Gaussian
#====================================================================
#===| setup and configuration |======================================
execfile('config.py')
doAssimilate = True
# -- observation errors (R)
obsLc = None
obsCorr = Uncorrelated(grid)
obsBias = 0.
obsVar = 0.1
# -- forecast errors (B)
fctLc = grid.L/20.
fctCorr = Soar(grid, fctLc)
fctBias = 0.
fctVar = 2.
# -- model errors (Q)
modLc = grid.L/50.
modCorr = Gaussian(grid, modLc)
modBias = 0.
modVar = 0.01
# -- initial truth state
ampl = 10.
truIc = ampl * np.exp(-grid.x**2/(grid.L/6.)**2)
# -- model
model = AdvectionDiffusionModel(grid, U, dt=dt, nu=nu)
# -- integration
nDt = 10
#====================================================================
#===| computations |=================================================
# -- memory allocation
times = np.array([i*dt for i in xrange(nDt+1)])
truTraj = np.empty(shape=(nDt+1, grid.J))
obsTraj = np.empty(shape=(nDt+1, grid.J))
anlTraj = np.empty(shape=(nDt+1, grid.J))
fctTraj = np.empty(shape=(nDt+1, grid.J))
fctVarTraj = np.empty(nDt+1)
anlVarTraj = np.empty(nDt+1)
# -- initial covariances
B = Covariance(grid, fctCorr.matrix * fctVar)
R = Covariance(grid, obsCorr.matrix * obsVar)
Q = Covariance(grid, modCorr.matrix * modVar)
# -- integration
xt = truIc
xbIc = xt + B.random(bias=fctBias)
xb=xbIc
A = B
for i in xrange(nDt+1):
stdout.write('..%d'%i)
stdout.flush()
# -- observations
y = xt + R.random(bias=obsBias)
if doAssimilate:
# -- analysis using OI
SInv = np.linalg.inv(B.matrix+R.matrix)
K = B.matrix.dot(SInv)
xa = xb + K.dot(y-xb)
# -- Kalman Filter
B = Covariance(grid, model(A.matrix) + Q.matrix )
A = Covariance(grid, (np.eye(grid.J) - K).dot(B.matrix) )
else:
xa = xb
# -- propagating
xb = model(xa)
xt = model(xt) + Q.random(bias=modBias)
# -- recording states
truTraj[i] = xt
obsTraj[i] = y
anlTraj[i] = xa
fctTraj[i] = xb
fctVarTraj[i] = B.variance[0]
anlVarTraj[i] = A.variance[0]
#====================================================================
#===| plots |========================================================
nTimeTicks = 5
fig = plt.figure(figsize=(8, 10))
fig.subplots_adjust(wspace=0.3, top=0.84)
truAx = plt.subplot(311)
fctAx = plt.subplot(312)
varAx = plt.subplot(313)
vmin = min((truTraj.min(), fctTraj.min()))
vmax = max((truTraj.max(), fctTraj.max()))
truAx.matshow(truTraj.T, origin='lower', vmin=vmin, vmax=vmax)
fctAx.matshow(fctTraj.T, origin='lower', vmin=vmin, vmax=vmax)
truAx.set_title('Truth')
fctAx.set_title('Forecasts')
gridTicksLabel, gridTicks, indexes = grid.ticks(units=km)
for axe in (truAx, fctAx):
axe.set_aspect('auto')
axe.xaxis.set_ticks_position('bottom')
axe.set_xticks(())
axe.set_ylabel(r'$x$ [km]')
axe.set_yticks(indexes)
axe.set_yticklabels(gridTicksLabel)
varAx.plot( times/h, obsVar*np.ones(len(times)),
linestyle='--', color='g', label=r'$\sigma_o^2$')
if modVar > 0:
varAx.plot( times/h, modVar*np.ones(len(times)),
linestyle='--', color='m', label=r'$\sigma_q^2$')
varAx.plot(times/h, fctVarTraj, color='b', label=r'$\sigma_f^2$')
varAx.plot(times/h, anlVarTraj, color='r', label=r'$\sigma_a^2$')
varAx.set_yscale('log')
varAx.set_xlabel(r'$t$ [hours]')
maxVar = max((obsVar, modVar, fctVarTraj.max(), anlVarTraj.max()))
if modVar > 0:
minVar = min((obsVar, modVar, fctVarTraj.min(), anlVarTraj.min()))
else:
minVar = min((obsVar, fctVarTraj.min(), anlVarTraj.min()))
varAx.set_ylim(0.8*minVar, 1.2*maxVar)
varAx.set_xticks(times[::nDt/nTimeTicks]/h)
varAx.legend(loc='upper right')
varAx.set_title('Forecast variance')
fig.suptitle( r'$\sigma_q^2=%.0e,\ \sigma_b^2=%.0e,\ \sigma_o^2=%.0e$'%(
modVar, fctVar, obsVar),
fontsize=16
)
plt.show()
|
martndj/DaleyMenard1993
|
kalmanFilter.py
|
Python
|
gpl-3.0
| 4,907
|
[
"Gaussian"
] |
3bba166893eab9c52c00681f103062e7fc69555bfa178b7964be279788b977e7
|
# (C) British Crown Copyright 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
import iris.tests as tests
import iris
import numpy as np
import PIL.Image
@tests.skip_gdal
@tests.skip_data
class TestGeoTiffExport(tests.IrisTest):
def check_tiff_header(self, geotiff_fh, reference_filename):
"""
Checks the given tiff file handle's metadata matches the
reference file contents.
"""
im = PIL.Image.open(geotiff_fh)
tiff_header = '\n'.join(str((tag, val))
if not isinstance(val, unicode)
else "(%s, '%s')" % (tag, val)
for tag, val in sorted(im.tag.items()))
reference_path = tests.get_result_path(reference_filename)
self._check_same(tiff_header, reference_path, reference_filename,
type_comparison_name='Tiff header')
def check_tiff(self, cube, tif_header):
import iris.experimental.raster
with self.temp_filename('.tif') as temp_filename:
iris.experimental.raster.export_geotiff(cube, temp_filename)
# Check the metadata is correct.
with open(temp_filename) as fh:
self.check_tiff_header(fh, ('experimental', 'raster',
tif_header))
# Ensure that north is at the top then check the data is correct.
coord_y = cube.coord(axis='Y', dim_coords=True)
data = cube.data
if np.diff(coord_y.bounds[0]) > 0:
data = cube.data[::-1, :]
im = PIL.Image.open(temp_filename)
im_data = np.array(im)
# Currently we only support writing 32-bit tiff, when comparing
# the data ensure that it is also 32-bit
np.testing.assert_array_equal(im_data,
data.astype(np.float32))
def test_unmasked(self):
tif_header = 'SMALL_total_column_co2.nc.tif_header.txt'
fin = tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_total_column_co2.nc'))
cube = iris.load_cube(fin)[0]
# PIL doesn't support float64
cube.data = cube.data.astype('f4')
# Ensure longitude values are continuous and monotonically increasing,
# and discard the 'half cells' at the top and bottom of the UM output
# by extracting a subset.
east = iris.Constraint(longitude=lambda cell: cell < 180)
non_edge = iris.Constraint(latitude=lambda cell: -90 < cell < 90)
cube = cube.extract(east & non_edge)
cube.coord('longitude').guess_bounds()
cube.coord('latitude').guess_bounds()
self.check_tiff(cube, tif_header)
# Check again with the latitude coordinate (and the corresponding
# cube.data) inverted. The output should be the same as before.
coord = cube.coord('latitude')
coord.points = coord.points[::-1]
coord.bounds = None
coord.guess_bounds()
cube.data = cube.data[::-1, :]
self.check_tiff(cube, tif_header)
def test_masked(self):
tif_header = 'SMALL_total_column_co2.nc.ma.tif_header.txt'
fin = tests.get_data_path(('NetCDF', 'global', 'xyt',
'SMALL_total_column_co2.nc'))
cube = iris.load_cube(fin)[0]
# PIL doesn't support float64
cube.data = cube.data.astype('f4')
# Repeat the same data extract as above
east = iris.Constraint(longitude=lambda cell: cell < 180)
non_edge = iris.Constraint(latitude=lambda cell: -90 < cell < 90)
cube = cube.extract(east & non_edge)
cube.coord('longitude').guess_bounds()
cube.coord('latitude').guess_bounds()
# Mask some of the data
cube.data = np.ma.masked_where(cube.data <= 380, cube.data)
self.check_tiff(cube, tif_header)
if __name__ == "__main__":
tests.main()
|
scollis/iris
|
lib/iris/tests/experimental/test_raster.py
|
Python
|
gpl-3.0
| 4,636
|
[
"NetCDF"
] |
2d08e97e8583805161d44f8afda0cca5375ae7638d125b952bfd60258f5fc5c5
|
""" Agent to extend the number of tasks given the Transformation definition
"""
from DIRAC import S_OK, gLogger
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
__RCSID__ = "$Id$"
AGENT_NAME = 'Transformation/MCExtensionAgent'
class MCExtensionAgent( AgentModule ):
def __init__( self, *args, **kwargs ):
''' c'tor
'''
AgentModule.__init__( self, *args, **kwargs )
self.transClient = TransformationClient()
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = sorted( agentTSTypes )
else:
self.transformationTypes = sorted( Operations().getValue( 'Transformations/ExtendableTransfTypes',
['MCSimulation', 'Simulation'] ) )
self.maxIterationTasks = self.am_getOption( 'TasksPerIteration', 50 )
self.maxFailRate = self.am_getOption( 'MaxFailureRate', 30 )
self.maxWaitingJobs = self.am_getOption( 'MaxWaitingJobs', 1000 )
#############################################################################
def initialize( self ):
'''Sets defaults
'''
gLogger.info( "Will consider the following transformation types: %s" % str( self.transformationTypes ) )
gLogger.info( "Will create a maximum of %s tasks per iteration" % self.maxIterationTasks )
gLogger.info( "Will not submit tasks for transformations with failure rate greater than %s%%" % ( self.maxFailRate ) )
gLogger.info( "Will not submit tasks for transformations with more than %d waiting jobs" % self.maxWaitingJobs )
return S_OK()
#############################################################################
def execute( self ):
''' The MCExtensionAgent execution method.
'''
self.enableFlag = self.am_getOption( 'EnableFlag', 'True' )
if not self.enableFlag == 'True':
self.log.info( 'MCExtensionAgent is disabled by configuration option EnableFlag' )
return S_OK( 'Disabled via CS flag' )
# Obtain the transformations in Cleaning status and remove any mention of the jobs/files
res = self.transClient.getTransformations( {'Status':'Active', 'Type':self.transformationTypes} )
if res['OK']:
for transDict in res['Value']:
transID = transDict['TransformationID']
maxTasks = transDict['MaxNumberOfTasks']
self.extendTransformation( transID, maxTasks )
return S_OK()
def extendTransformation( self, transID, maxTasks ):
gLogger.info( "Considering extension of transformation %d" % transID )
# Get the current count of tasks submitted for this transformation
res = self.transClient.getTransformationTaskStats( transID )
if not res['OK']:
if res['Message'] != 'No records found':
gLogger.error( "Failed to get task statistics", "%s %s" % ( transID, res['Message'] ) )
return res
else:
statusDict = {}
else:
statusDict = res['Value']
gLogger.verbose( "Current task count for transformation %d" % transID )
for status in sorted( statusDict.keys() ):
statusCount = statusDict[status]
gLogger.verbose( "%s : %s" % ( status.ljust( 20 ), str( statusCount ).rjust( 8 ) ) )
# Determine the number of tasks to be created
numberOfTasks = self._calculateTaskNumber( maxTasks, statusDict )
if not numberOfTasks:
gLogger.info( "No tasks required for transformation %d" % transID )
return S_OK()
# Extend the transformation by the determined number of tasks
res = self.transClient.extendTransformation( transID, numberOfTasks )
if not res['OK']:
gLogger.error( "Failed to extend transformation", "%s %s" % ( transID, res['Message'] ) )
return res
gLogger.info( "Successfully extended transformation %d by %d tasks" % ( transID, numberOfTasks ) )
return S_OK()
def _calculateTaskNumber( self, maxTasks, statusDict ):
''' Utility function
'''
done = statusDict.get( 'Done', 0 )
failed = statusDict.get( 'Failed', 0 )
waiting = statusDict.get( 'Waiting', 0 )
total = statusDict.get('TotalCreated', 0)
# If the failure rate is higher than acceptable
if ( total != 0 ) and ( ( 100.0 * float( failed ) / float( total ) ) > self.maxFailRate ):
return 0
# If we already have enough completed jobs
if done >= maxTasks:
return 0
if waiting > self.maxWaitingJobs:
return 0
numberOfTasks = maxTasks - ( total - failed )
if numberOfTasks > self.maxIterationTasks:
numberOfTasks = self.maxIterationTasks
return numberOfTasks
|
chaen/DIRAC
|
TransformationSystem/Agent/MCExtensionAgent.py
|
Python
|
gpl-3.0
| 4,845
|
[
"DIRAC"
] |
274516cd93a20d2c0e8d64fc4cf3fe039fe1d2965e3e92ec864947b098c12d2b
|
from sys import argv
import pylab as plt
from ase.dft import STM
from gpaw import restart
filename = argv[1]
if len(argv) > 2:
z = float(argv[2])
else:
z = 2.5
atoms, calc = restart(filename, txt=None)
stm = STM(atoms, symmetries=[0, 1, 2])
c = stm.get_averaged_current(z)
# Get 2d array of constant current heights:
h = stm.scan(c)
print u'Min: %.2f Ang, Max: %.2f Ang' % (h.min(), h.max())
plt.contourf(h, 40)
plt.hot()
plt.colorbar()
plt.show()
|
qsnake/gpaw
|
doc/exercises/stm/stm.py
|
Python
|
gpl-3.0
| 469
|
[
"ASE",
"GPAW"
] |
450ee736f3e42c0e9a81e2c8f167aa09a5259485d222c7758f718a4ca6e43caf
|
import common, chess, copy
# patterns/cycles/pw
def check(problem, board, solution):
retval = {}
# patterns/cycles
tb = TrajectoriesBuilder()
tb.visit(solution, [])
traverse_trajectories([], tb.result, retval)
# pw
traverse_for_platzwechsel([], solution.siblings, retval)
return retval
def traverse_for_platzwechsel(head, tail, retval):
if len(head) > 2:
# upon each move in the solution tree:
# 1) trace piece route to the arrival square (route)
# 2) find all who left from the arrival square (candidates)
# 3) for each of them check if they crossed the route
route, candidates = [], []
for i in xrange(len(head) - 1):
if head[i].departing_piece_id == head[-1].departing_piece_id:
route.append(head[i].dep[1])
if head[i].dep[1] == head[-1].arr[1] and head[i].departing_piece_id != head[-1].departing_piece_id:
candidates.append(head[i].departing_piece_id)
if head[i].departing_piece_id in candidates and head[i].arr[1] in route:
retval['Platzwechsel'] = True
retval['Platzwechsel('+get_piece_name(head[-1].dep[0])+'/'+ get_piece_name(head[i].dep[0]) +')'] = True
for node in tail:
new_head = copy.copy(head)
if isinstance(node, chess.MoveNode) and not isinstance(node.move, chess.NullMove):
new_head.append(node.move)
traverse_for_platzwechsel(new_head, node.siblings, retval)
# rundlauf: cycle of length > 3 w/o repeated squares, all squares are not on a single line
# switchback: any other cycle
def traverse_trajectories(head, tail, retval):
# patterns
if len(head) > 0:
if len(head[-1].branches) > 3:
patterns = get_patterns(head[-1].square)
for (name, squares) in patterns.items():
if len(squares) == len([y for y in squares if y in [x.square for x in head[-1].branches]]):
retval[name] = True
retval[name+'('+get_piece_name(head[-1].piece)+')'] = True # todo albino/p-ny
# cycles
if len(head) > 2:
# search the head backwards to find its last element
prev = -1
for i in xrange(len(head)-2, -1, -1):
if head[i].square == head[-1].square:
rl = len(head) - 1 - i > 1 # cycle length > 2
if rl: # all cycle elements are different
rl = common.all_different([x.square for x in head[i+1:len(head)-2]])
if rl: # all cycle elements are not on the same line
for j in xrange(i+1, len(head)-2):
if not chess.LUT.att['q'][head[i].square][head[j].square]:
break
else:
rl = False
if rl:
retval['Rundlauf'] = True
retval['Rundlauf('+get_piece_name(head[-1].piece)+')'] = True
else:
retval['Switchback'] = True
retval['Switchback('+get_piece_name(head[-1].piece)+')'] = True
for tnode in tail:
new_head = copy.copy(head)
new_head.append(tnode)
traverse_trajectories(new_head, tnode.branches, retval)
class TNode:
def __init__(self, square, id, piece):
self.square, self.id, self.piece, self.branches = square, id, piece, []
def dump(self, level):
for i in xrange(level):
print " ",
print '->', self.piece + chess.to_xy(self.square)
for tn in self.branches:
tn.dump(level+1)
class TrajectoriesBuilder():
def __init__(self):
self.result = []
def visit(self, solution_node, level):
if isinstance(solution_node, chess.MoveNode) and not isinstance(solution_node.move, chess.NullMove):
# looking for the piece in the result
for tnode in self.result:
if tnode.id == solution_node.move.departing_piece_id and tnode.piece == solution_node.move.dep[0]:
# if it is not in level
for tnode2 in level:
if tnode2.id == tnode.id and tnode2.piece == tnode.piece:
pass
else:
level.append(tnode)
break # ok, it's there
else:
new_tnode = TNode(solution_node.move.dep[1], solution_node.move.departing_piece_id, solution_node.move.dep[0])
self.result.append(new_tnode)
level.append(new_tnode)
# looking for the piece in the level
for i in xrange(len(level)):
if level[i].id == solution_node.move.departing_piece_id and level[i].piece == solution_node.move.dep[0]:
new_tnode = TNode(solution_node.move.arr[1], level[i].id, level[i].piece)
level[i].branches.append(new_tnode)
new_level = []
for j in xrange(len(level)):
if i <> j:
new_level.append(level[j])
else:
new_level.append(new_tnode)
level = new_level
break
for node in solution_node.siblings:
self.visit(node, level)
def get_patterns(square):
patterns = {\
'Star':[(1,1),(1,-1),(-1,1),(-1,-1)],\
'Big star':[(2,2),(2,-2),(-2,2),(-2,-2)],\
'Cross':[(0,1),(0,-1),(-1,0),(1,0)],\
'Big cross':[(0,2),(0,-2),(-2,0),(2,0)],\
'Wheel':[(1,2),(2,1),(2,-1),(1,-2),(-1,-2),(-2,-1),(-2,1),(-1,2)],\
'Albino':[(-1,-1),(1,-1),(0,-1),(0,-2)],\
'Pickaninny':[(-1,1),(1,1),(0,1),(0,2)],\
}
retval = {}
x, y = chess.LUT.to_xy(square)
for (name, vecs) in patterns.items():
tmp = []
for (a, b) in vecs:
x_, y_ = x+a, y+b
if chess.LUT.oob(x_, y_): break
tmp.append(chess.LUT.from_xy(x_, y_))
else:
retval[name] = tmp
return retval
def get_piece_name(piece):
return "WB"[piece == piece.lower()] + piece.upper()
|
tectronics/olive-gui
|
legacy/trajectories.py
|
Python
|
gpl-3.0
| 6,236
|
[
"VisIt"
] |
f9dbc1cd0d8403b60423661ad100ea2ae8071bf1cebf5fb2db7a206a1da3334d
|
# Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Mirror of :mod:`iris.tests.unit.fileformats.netcdf.test_Saver`, but with lazy arrays."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
from dask import array as da
from iris.coords import AuxCoord
from iris.fileformats.netcdf import Saver
from iris.tests import stock
from iris.tests.unit.fileformats.netcdf import test_Saver
class LazyMixin(tests.IrisTest):
array_lib = da
def result_path(self, basename=None, ext=""):
# Precisely mirroring the tests in test_Saver, so use those CDL's.
original = super().result_path(basename, ext)
return original.replace("Saver__lazy", "Saver")
class Test_write(LazyMixin, test_Saver.Test_write):
pass
class Test__create_cf_bounds(test_Saver.Test__create_cf_bounds):
@staticmethod
def climatology_3d():
cube = stock.climatology_3d()
aux_coord = AuxCoord.from_coord(cube.coord("time"))
lazy_coord = aux_coord.copy(
aux_coord.lazy_points(), aux_coord.lazy_bounds()
)
cube.replace_coord(lazy_coord)
return cube
class Test_write__valid_x_cube_attributes(
LazyMixin, test_Saver.Test_write__valid_x_cube_attributes
):
pass
class Test_write__valid_x_coord_attributes(
LazyMixin, test_Saver.Test_write__valid_x_coord_attributes
):
pass
class Test_write_fill_value(LazyMixin, test_Saver.Test_write_fill_value):
pass
class Test_check_attribute_compliance__valid_range(
LazyMixin, test_Saver.Test_check_attribute_compliance__valid_range
):
pass
class Test_check_attribute_compliance__valid_min(
LazyMixin, test_Saver.Test_check_attribute_compliance__valid_min
):
pass
class Test_check_attribute_compliance__valid_max(
LazyMixin, test_Saver.Test_check_attribute_compliance__valid_max
):
pass
class Test_check_attribute_compliance__exception_handling(
LazyMixin, test_Saver.Test_check_attribute_compliance__exception_handling
):
pass
class Test__create_cf_cell_measure_variable(
LazyMixin, test_Saver.Test__create_cf_cell_measure_variable
):
pass
class TestStreamed(tests.IrisTest):
def setUp(self):
self.cube = stock.simple_2d()
self.store_watch = self.patch("dask.array.store")
def save_common(self, cube_to_save):
with self.temp_filename(".nc") as nc_path:
with Saver(nc_path, "NETCDF4") as saver:
saver.write(cube_to_save)
def test_realised_not_streamed(self):
self.save_common(self.cube)
self.assertFalse(self.store_watch.called)
def test_lazy_streamed_data(self):
self.cube.data = self.cube.lazy_data()
self.save_common(self.cube)
self.assertTrue(self.store_watch.called)
def test_lazy_streamed_coord(self):
aux_coord = AuxCoord.from_coord(self.cube.coords()[0])
lazy_coord = aux_coord.copy(
aux_coord.lazy_points(), aux_coord.lazy_bounds()
)
self.cube.replace_coord(lazy_coord)
self.save_common(self.cube)
self.assertTrue(self.store_watch.called)
def test_lazy_streamed_bounds(self):
aux_coord = AuxCoord.from_coord(self.cube.coords()[0])
lazy_coord = aux_coord.copy(aux_coord.points, aux_coord.lazy_bounds())
self.cube.replace_coord(lazy_coord)
self.save_common(self.cube)
self.assertTrue(self.store_watch.called)
if __name__ == "__main__":
tests.main()
|
SciTools/iris
|
lib/iris/tests/unit/fileformats/netcdf/test_Saver__lazy.py
|
Python
|
lgpl-3.0
| 3,694
|
[
"NetCDF"
] |
403a88425d09b928cdd7838b590e49a5916cc29f8f1d9a4cac52c9369cd987f7
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-delete
# Author : Stuart Paterson
########################################################################
"""
Reschedule the given DIRAC job
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac, parseArguments
dirac = Dirac()
exitCode = 0
errorList = []
result = dirac.rescheduleJob( parseArguments( args ) )
if result['OK']:
print('Rescheduled job %s' % ','.join([str(j) for j in result['Value']]))
else:
errorList.append( ( j, result['Message'] ) )
print(result['Message'])
exitCode = 2
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit( exitCode )
|
chaen/DIRAC
|
Interfaces/scripts/dirac-wms-job-reschedule.py
|
Python
|
gpl-3.0
| 1,297
|
[
"DIRAC"
] |
697fac207899a654599828d52e076343a346a65cb9e2061ea8b5f106b1a1e876
|
# -----------------------------------------------------------------------
#
# < pyetree_concert.py >
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# File Name : pyetree_concert.py
#
# Author : Josef Grosch
#
# Date : 23 Jun 2016
#
# Modification : Some
#
# Application :
#
# Description :
#
# Notes :
#
# Functions :
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# Copyright
#
# Copyright (c) 2015 - 2016 Moose River LLC.
# < jgrosch@gmail.com >
#
# All Rights Reserved
#
# Deadicated to my brother Jerry Garcia,
# who passed from this life on August 9, 1995.
# Happy trails to you, Jerry
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# License
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# GPG Key
#
# pub 4096R/2C38BBFA 2016-05-21 [expires: 2017-05-21]
# Key fingerprint = A855 3BF0 544B 3B4E F06D 2B90 8DDC FDDA 2C38 BBFA
# uid Josef Grosch <jgrosch@gmail.com>
# sub 4096R/CC2D1F80 2016-05-21 [expires: 2017-05-21]
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# Contact Information
#
# Moose River LLC.
# P.O. Box 9403
# Berkeley, Ca. 94709
#
# http://www.mooseriver.com
#
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
#
# Import
#
# -----------------------------------------------------------------------
import os, sys
"""
"""
#--start constants--
__author__ = "Josef Grosch"
__copyright__ = "Copyright 2015 - 2016 Moose River, LLC."
__license__ = "None"
__version__ = "0.1"
__maintainer__ = "Josef Grosch"
__email__ = "jgrosch@gmail.com"
__status__ = "Development"
#--end constants--
# -----------------------------------------------------------------------
#
# Class Concert
#
# -----------------------------------------------------------------------
class Concert:
# -----------------------------------------------------------------------
#
# __init__
#
# -----------------------------------------------------------------------
def __init__(self):
"""
Initializes this class with the following variables
debug = False
loaded = False
table_name = 'pyetree'
"""
#
# End of __init__
#
# -----------------------------------------------------------------------
#
# < End of pyetree_concert.py >
#
# -----------------------------------------------------------------------
|
josefgrosch/Pyetree
|
pyetree/PyetreeConcert.py
|
Python
|
apache-2.0
| 3,926
|
[
"MOOSE"
] |
55dbf252d52cc780fd099b89969f10aee4e99fd4112f37dfc16996f83c992ab2
|
from django.contrib import admin
# Register your models here.
from .models import (
Collaborator,
Harvest,
Route,
Visit,
)
from pmm.admin import PersonAdmin
from core.admin import ContentAdmin
@admin.register(Collaborator)
class CollaboratorAdmin(PersonAdmin):
list_display = PersonAdmin.list_display + ('route', 'route_captain',)
list_filter = PersonAdmin.list_filter + ('route', 'route_captain',)
search_fields = PersonAdmin.search_fields + ('route', 'route_captain',)
@admin.register(Harvest)
class HarvestAdmin(PersonAdmin):
list_display = PersonAdmin.list_display + ('route', 'discarded',)
list_filter = PersonAdmin.list_filter + ('route', 'discarded',)
search_fields = PersonAdmin.search_fields + ('route', 'discarded',)
@admin.register(Route)
class RouteAdmin(ContentAdmin):
pass
@admin.register(Visit)
class VisitAdmin(admin.ModelAdmin):
list_display = ('pk', 'collaborator', 'harvest', 'date',)
list_filter = ('collaborator', 'harvest', 'date',)
list_display_links = ['pk', 'collaborator']
ordering = ('pk', 'collaborator', 'harvest', 'date',)
search_fields = ('collaborator', 'harvest', 'date',)
|
sauli6692/ibc-server
|
rte/admin.py
|
Python
|
mit
| 1,179
|
[
"VisIt"
] |
3b919b80e4d9ee716bae8a35a33b4e3e615e6af7add46beec5946a9866e3d986
|
'''
Created on Jul 21, 2011
@author: mkiyer
'''
import logging
import os
import sys
from chimerascan import pysam
from chimerascan.lib import config
from chimerascan.lib.chimera import DiscordantTags, DISCORDANT_TAG_NAME, \
OrientationTags, ORIENTATION_TAG_NAME, DiscordantRead
from chimerascan.lib.gene_to_genome import build_tid_gene_map
from chimerascan.lib.batch_sort import batch_sort
def parse_pairs(bamfh):
bam_iter = iter(bamfh)
try:
while True:
r1 = bam_iter.next()
r2 = bam_iter.next()
yield r1,r2
except StopIteration:
pass
def parse_gene_discordant_reads(bamfh):
"""
return tuples of (5',3') reads that both align to transcripts
"""
for r1,r2 in parse_pairs(bamfh):
# TODO:
# for now we are only going to deal with gene-gene
# chimeras and leave other chimeras for study at a
# later time
dr1 = r1.opt(DISCORDANT_TAG_NAME)
dr2 = r2.opt(DISCORDANT_TAG_NAME)
if (dr1 != DiscordantTags.DISCORDANT_GENE or
dr2 != DiscordantTags.DISCORDANT_GENE):
continue
# organize key in 5' to 3' order
or1 = r1.opt(ORIENTATION_TAG_NAME)
or2 = r2.opt(ORIENTATION_TAG_NAME)
assert or1 != or2
if or1 == OrientationTags.FIVEPRIME:
pair = (r1,r2)
else:
pair = (r2,r1)
yield pair
def discordant_reads_to_bedpe(index_dir, input_bam_file, output_file):
# open BAM alignment file
bamfh = pysam.Samfile(input_bam_file, "rb")
# build a lookup table to get genomic intervals from transcripts
logging.debug("Reading gene information")
gene_file = os.path.join(index_dir, config.GENE_FEATURE_FILE)
tid_gene_map = build_tid_gene_map(bamfh, gene_file,
rname_prefix=config.GENE_REF_PREFIX)
outfh = open(output_file, "w")
logging.debug("Converting BAM to BEDPE format")
for r5p,r3p in parse_gene_discordant_reads(bamfh):
# store pertinent read information in lightweight structure called
# DiscordantRead object. this departs from SAM format into a
# custom read format
dr5p = DiscordantRead.from_read(r5p)
dr3p = DiscordantRead.from_read(r3p)
# get gene information
tx5p = tid_gene_map[r5p.rname]
tx3p = tid_gene_map[r3p.rname]
# write bedpe format
fields = [tx5p.tx_name, r5p.pos, r5p.aend,
tx3p.tx_name, r3p.pos, r3p.aend,
r5p.qname, # read name
0, # score
tx5p.strand, tx3p.strand, # strand 1, strand 2
]
fields.append('|'.join(map(str, dr5p.to_list())))
fields.append('|'.join(map(str, dr3p.to_list())))
print >>outfh, '\t'.join(map(str, fields))
outfh.close()
def sort_bedpe(input_file, output_file, tmp_dir):
# sort BEDPE file by paired chromosome/position
def sortfunc(line):
fields = line.strip().split('\t')
return tuple([fields[0], fields[3], fields[1], fields[4]])
tempdirs = [tmp_dir]
batch_sort(input=input_file,
output=output_file,
key=sortfunc,
buffer_size=32000,
tempdirs=tempdirs)
def main():
from optparse import OptionParser
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <index> <pairs.bam> <out.bedpe>")
options, args = parser.parse_args()
index_dir = args[0]
input_bam_file = args[1]
output_file = args[2]
return discordant_reads_to_bedpe(index_dir,
input_bam_file,
output_file)
if __name__ == '__main__':
sys.exit(main())
|
genome-vendor/chimerascan
|
chimerascan/pipeline/discordant_reads_to_bedpe.py
|
Python
|
gpl-3.0
| 3,908
|
[
"pysam"
] |
2d8c73adb13d9f361a0530c992189d629f6377184b5b2b9810c5ce3be886c910
|
#!/usr/bin/python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import os
import glob
import re
DriverPath = ''
InsertPath = '/../../../'
if (len(sys.argv) == 2):
DriverPath = sys.argv[1] + '/'
sys.path.insert(0, os.path.abspath(os.getcwd()))
def pts(category, pyfile):
print('Auto-documenting %s module %s' % (category, pyfile))
# Available psi variables in psi4/driver/qcdb/cfour.py
fdriver = open('source/autodir_psivariables/module__cfour.rst', 'w')
fdriver.write('\n\n')
psivars = []
for pyfile in glob.glob(DriverPath + '../../psi4/driver/qcdb/cfour.py'):
filename = os.path.split(pyfile)[1]
basename = os.path.splitext(filename)[0]
div = '=' * len(basename)
if basename not in []:
pts('psi variables', basename)
fdriver.write('.. _`apdx:%s_psivar`:\n\n' % (basename.lower()))
fdriver.write('\n%s\n%s\n\n' % (basename.upper(), '"' * len(basename)))
fdriver.write('.. hlist::\n :columns: 1\n\n')
f = open(pyfile)
contents = f.readlines()
f.close()
for line in contents:
mobj = re.search(r"""^\s*psivar\[\'(.*)\'\]\s*=""", line)
if mobj:
if mobj.group(1) not in psivars:
psivars.append(mobj.group(1))
for pv in sorted(psivars):
pvsquashed = pv.replace(' ', '')
fdriver.write(' * :psivar:`%s <%s>`\n\n' % (pv, pvsquashed))
fdriver.write('\n')
fdriver.close()
for line in open('source/autodoc_psivariables_bymodule.rst'):
if 'module__cfour' in line:
break
else:
fdriver = open('source/autodoc_psivariables_bymodule.rst', 'a')
fdriver.write(' autodir_psivariables/module__cfour\n\n')
fdriver.close()
|
CDSherrill/psi4
|
doc/sphinxman/document_cfour.py
|
Python
|
lgpl-3.0
| 2,585
|
[
"CFOUR",
"Psi4"
] |
b8ac6f269354fe2143f79e145e89a97ce322a073551386a52d67556e65fc58d5
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
import espressomd.lb
from scipy.optimize import curve_fit
AGRID = .5
N_CELLS = 12
TAU = 0.002
SEED = 1
DENS = 2.4
VISC = 1.8
KT = 0.8
class TestLBPressureACF:
"""Tests that the thermalized LB pressure auto correlation function
is consistent with the chosen viscosity
"""
system = espressomd.System(box_l=[AGRID * N_CELLS] * 3)
system.time_step = TAU
system.cell_system.skin = 0
def tearDown(self):
self.system.actors.clear()
self.system.thermostat.turn_off()
def test(self):
# setup
system = self.system
lb = self.lb_class(agrid=AGRID, dens=DENS, visc=VISC,
tau=TAU, kT=KT, seed=SEED)
system.actors.add(lb)
system.thermostat.set_lb(LB_fluid=lb, seed=2)
# Warmup
system.integrator.run(500)
# sampling
steps = 50000
p_global = np.zeros((steps, 3, 3))
p_node = np.zeros((steps, 3, 3))
node = lb[0, 0, 0]
for i in range(steps):
p_node[i] = node.pressure_tensor
p_global[i] = lb.pressure_tensor
system.integrator.run(2)
# Test that <sigma_[i!=j]> ~=0 and sigma_[ij]=sigma_[ji]
tol_global = 4 / np.sqrt(steps)
tol_node = tol_global * np.sqrt(N_CELLS**3)
# check single node
for i in range(3):
for j in range(i + 1, 3):
avg_ij = np.average(p_node[:, i, j])
avg_ji = np.average(p_node[:, i, j])
self.assertEqual(avg_ij, avg_ji)
self.assertLess(avg_ij, tol_node)
# check system-wide pressure
for i in range(3):
for j in range(i + 1, 3):
avg_ij = np.average(p_global[:, i, j])
avg_ji = np.average(p_global[:, i, j])
self.assertEqual(avg_ij, avg_ji)
self.assertLess(avg_ij, tol_global)
# Check that stress auto correlatin matches dynamic viscosity
# eta = V/kT integral(stress acf)
all_viscs = []
for i in range(3):
for j in range(i + 1, 3):
# Calculate acf
tmp = np.correlate(
p_global[:, i, j], p_global[:, i, j], mode="full")
acf = tmp[len(tmp) // 2:] / steps
# integrate first part numerically, fit exponential to tail
t_max_fit = 50 * TAU
ts = np.arange(0, t_max_fit, 2 * TAU)
numeric_integral = np.trapz(acf[:len(ts)], dx=2 * TAU)
# fit tail
def f(x, a, b): return a * np.exp(-b * x)
(a, b), _ = curve_fit(f, acf[:len(ts)], ts)
tail = f(ts[-1], a, b) / b
integral = numeric_integral + tail
measured_visc = integral * system.volume() / KT
self.assertAlmostEqual(
measured_visc, VISC * DENS, delta=VISC * DENS * .15)
all_viscs.append(measured_visc)
# Check average over xy, xz and yz against tighter limit
self.assertAlmostEqual(np.average(all_viscs),
VISC * DENS, delta=VISC * DENS * .07)
# DISABLE CPU TEST UNTIL #3804 IS SOLVED
# class TestLBPressureACFCPU(TestLBPressureACF, ut.TestCase):
#
# def setUp(self):
# self.lb_class = espressomd.lb.LBFluid
#
#
@utx.skipIfMissingGPU()
class TestLBPressureACFGPU(TestLBPressureACF, ut.TestCase):
def setUp(self):
self.lb_class = espressomd.lb.LBFluidGPU
if __name__ == "__main__":
ut.main()
|
KaiSzuttor/espresso
|
testsuite/python/lb_pressure_tensor_acf.py
|
Python
|
gpl-3.0
| 4,387
|
[
"ESPResSo"
] |
d9ea69d0309f2ab312ba3d579b141efbc334a6443ac45a959969f1f835e683fb
|
from __future__ import print_function, absolute_import
import os
import sys
import json
import shutil
import subprocess
import tempfile
import warnings
from setuptools import Extension
from distutils.dep_util import newer_group
from distutils.errors import DistutilsExecError, DistutilsSetupError
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_vars
from setuptools.command.build_ext import build_ext as _build_ext
################################################################################
# Detection of compiler capabilities
################################################################################
class CompilerDetection(object):
# Necessary for OSX. See https://github.com/mdtraj/mdtraj/issues/576
# The problem is that distutils.sysconfig.customize_compiler()
# is necessary to properly invoke the correct compiler for this class
# (otherwise the CC env variable isn't respected). Unfortunately,
# distutils.sysconfig.customize_compiler() DIES on OSX unless some
# appropriate initialization routines have been called. This line
# has a side effect of calling those initialzation routes, and is therefor
# necessary for OSX, even though we don't use the result.
_DONT_REMOVE_ME = get_config_vars()
def __init__(self, disable_openmp):
self.disable_openmp = disable_openmp
self._is_initialized = False
def initialize(self):
if self._is_initialized:
return
cc = new_compiler()
customize_compiler(cc)
self.msvc = cc.compiler_type == 'msvc'
self._print_compiler_version(cc)
if self.disable_openmp:
self.openmp_enabled = False
else:
self.openmp_enabled, openmp_needs_gomp = self._detect_openmp()
self.sse3_enabled = self._detect_sse3() if not self.msvc else True
self.sse41_enabled = self._detect_sse41() if not self.msvc else True
self.neon_enabled = self._detect_neon() if not self.msvc else False
self.compiler_args_sse2 = ['-msse2'] if not self.msvc else ['/arch:SSE2']
self.compiler_args_sse3 = ['-mssse3'] if (self.sse3_enabled and not self.msvc) else []
self.compiler_args_neon = []
self.compiler_args_warn = ['-Wno-unused-function', '-Wno-unreachable-code', '-Wno-sign-compare'] if not self.msvc else []
if self.neon_enabled:
self.compiler_args_sse2 = []
self.compiler_args_sse3 = []
self.compiler_args_sse41, self.define_macros_sse41 = [], []
if self.sse41_enabled:
self.define_macros_sse41 = [('__SSE4__', 1), ('__SSE4_1__', 1)]
if not self.msvc:
self.compiler_args_sse41 = ['-msse4']
if self.openmp_enabled:
self.compiler_libraries_openmp = []
if self.msvc:
self.compiler_args_openmp = ['/openmp']
else:
self.compiler_args_openmp = ['-fopenmp']
if openmp_needs_gomp:
self.compiler_libraries_openmp = ['gomp']
else:
self.compiler_libraries_openmp = []
self.compiler_args_openmp = []
if self.msvc:
self.compiler_args_opt = ['/O2']
else:
self.compiler_args_opt = ['-O3', '-funroll-loops']
print()
self._is_initialized = True
def _print_compiler_version(self, cc):
print("C compiler:")
try:
if self.msvc:
if not cc.initialized:
cc.initialize()
cc.spawn([cc.cc])
else:
cc.spawn([cc.compiler[0]] + ['-v'])
except DistutilsExecError:
pass
def hasfunction(self, funcname, include=None, libraries=None, extra_postargs=None):
# running in a separate subshell lets us prevent unwanted stdout/stderr
part1 = '''
from __future__ import print_function
import os
import json
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_vars
FUNCNAME = json.loads('%(funcname)s')
INCLUDE = json.loads('%(include)s')
LIBRARIES = json.loads('%(libraries)s')
EXTRA_POSTARGS = json.loads('%(extra_postargs)s')
''' % {
'funcname': json.dumps(funcname),
'include': json.dumps(include),
'libraries': json.dumps(libraries or []),
'extra_postargs': json.dumps(extra_postargs)}
part2 = '''
get_config_vars() # DON'T REMOVE ME
cc = new_compiler()
customize_compiler(cc)
for library in LIBRARIES:
cc.add_library(library)
status = 0
try:
with open('func.c', 'w') as f:
if INCLUDE is not None:
f.write('#include %s\\n' % INCLUDE)
f.write('int main(void) {\\n')
f.write(' %s;\\n' % FUNCNAME)
f.write('}\\n')
objects = cc.compile(['func.c'], output_dir='.',
extra_postargs=EXTRA_POSTARGS)
cc.link_executable(objects, 'a.out')
except Exception as e:
status = 1
exit(status)
'''
tmpdir = tempfile.mkdtemp(prefix='hasfunction-')
try:
curdir = os.path.abspath(os.curdir)
os.chdir(tmpdir)
with open('script.py', 'w') as f:
f.write(part1 + part2)
proc = subprocess.Popen(
[sys.executable, 'script.py'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate()
status = proc.wait()
finally:
os.chdir(curdir)
shutil.rmtree(tmpdir)
return status == 0
def _print_support_start(self, feature):
print('Attempting to autodetect {0:6} support...'.format(feature), end=' ')
def _print_support_end(self, feature, status):
if status is True:
print('Compiler supports {0}'.format(feature))
else:
print('Did not detect {0} support'.format(feature))
def _detect_openmp(self):
self._print_support_start('OpenMP')
extra_postargs = ['/openmp'] if self.msvc else ['-fopenmp']
args = dict(extra_postargs=extra_postargs, include='<omp.h>')
hasopenmp = self.hasfunction('omp_get_num_threads()', **args)
needs_gomp = False
if not hasopenmp:
hasopenmp = self.hasfunction('omp_get_num_threads()', libraries=['gomp'], **args)
needs_gomp = hasopenmp
self._print_support_end('OpenMP', hasopenmp)
return hasopenmp, needs_gomp
def _detect_sse3(self):
"Does this compiler support SSE3 intrinsics?"
self._print_support_start('SSE3')
result = self.hasfunction('__m128 v; _mm_hadd_ps(v,v)',
include='<pmmintrin.h>',
extra_postargs=['-msse3'])
self._print_support_end('SSE3', result)
return result
def _detect_sse41(self):
"Does this compiler support SSE4.1 intrinsics?"
self._print_support_start('SSE4.1')
result = self.hasfunction( '__m128 v; _mm_round_ps(v,0x00)',
include='<smmintrin.h>',
extra_postargs=['-msse4'])
self._print_support_end('SSE4.1', result)
return result
def _detect_neon(self):
"""Does this compiler support NEON intrinsics (ARM64)
"""
self._print_support_start("NEON")
result = self.hasfunction("int16x4_t acc = vdup_n_s16(0);", include="<arm_neon.h>")
self._print_support_end("NEON", result)
return result
################################################################################
# Writing version control information to the module
################################################################################
def git_version():
# Return the git revision as a string
# copied from numpy setup.py
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
return GIT_REVISION
def write_version_py(version, isreleased, filename):
cnt = """
# This file is generated in setup.py at build time.
version = '{version}'
short_version = '{short_version}'
full_version = '{full_version}'
git_revision = '{git_revision}'
release = {release}
"""
# git_revision
if os.path.exists('.git'):
git_revision = git_version()
else:
git_revision = 'Unknown'
# short_version, full_version
if isreleased:
full_version = version
short_version = version
else:
full_version = ("{version}+{git_revision}"
.format(version=version, git_revision=git_revision))
short_version = version
with open(filename, 'w') as f:
f.write(cnt.format(version=version,
short_version=short_version,
full_version=full_version,
git_revision=git_revision,
release=isreleased))
class StaticLibrary(Extension):
def __init__(self, *args, **kwargs):
self.export_include = kwargs.pop('export_include', [])
Extension.__init__(self, *args, **kwargs)
class build_ext(_build_ext):
def build_extension(self, ext):
if isinstance(ext, StaticLibrary):
self.build_static_extension(ext)
else:
_build_ext.build_extension(self, ext)
def copy_extensions_to_source(self):
_extensions = self.extensions
self.extensions = [e for e in _extensions if not isinstance(e, StaticLibrary)]
_build_ext.copy_extensions_to_source(self)
self.extensions = _extensions
def build_static_extension(self, ext):
from distutils import log
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
self._built_objects = objects[:]
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
language = ext.language or self.compiler.detect_language(sources)
libname = os.path.basename(ext_path).split(os.extsep)[0]
output_dir = os.path.dirname(ext_path)
if (self.compiler.static_lib_format.startswith('lib') and
libname.startswith('lib')):
libname = libname[3:]
# 1. copy to build directory
# 1. copy to src tree for develop mode
import re
src_tree_output_dir = re.match('build.*(mdtraj.*)', output_dir).group(1)
if not os.path.exists(src_tree_output_dir):
os.makedirs(src_tree_output_dir)
if not os.path.exists(output_dir):
# necessary for windows
os.makedirs(output_dir)
assert os.path.isdir(src_tree_output_dir)
self.compiler.create_static_lib(objects,
output_libname=libname,
output_dir=output_dir,
target_lang=language)
lib_path = self.compiler.library_filename(libname, output_dir=output_dir)
shutil.copy(lib_path, src_tree_output_dir)
for item in ext.export_include:
shutil.copy(item, src_tree_output_dir)
shutil.copy(item, output_dir)
def get_ext_filename(self, ext_name):
filename = _build_ext.get_ext_filename(self, ext_name)
try:
exts = [e for e in self.extensions if ext_name in {e.name, e.name.split('.')[-1]}]
ext = exts[0]
if isinstance(ext, StaticLibrary):
if new_compiler().compiler_type == 'msvc':
return filename.split('.')[0] + '.lib'
else:
return filename.split('.')[0] + '.a'
except Exception as e:
pass
return filename
def parse_setuppy_commands():
"""Check the commands and respond appropriately.
Return a boolean value for whether or not to run the build or not (avoid
parsing Cython and template files if False).
Adopted from scipy setup
"""
args = sys.argv[1:]
if not args:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
for command in info_commands:
if command in args:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg',
'build_sphinx')
for command in good_commands:
if command in args:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in args:
return True
if '--help' in args or '-h' in sys.argv[1]:
return False
# Commands that do more than print info, but also don't need Cython and
# template parsing.
other_commands = ['egg_info', 'install_egg_info', 'rotate']
for command in other_commands:
if command in args:
return False
# If we got here, we didn't detect what setup.py command was given
warnings.warn("Unrecognized setuptools command ('{}'), proceeding with "
"generating Cython sources and expanding templates".format(
' '.join(sys.argv[1:])))
return True
|
gph82/mdtraj
|
basesetup.py
|
Python
|
lgpl-2.1
| 15,842
|
[
"MDTraj"
] |
45f005df053aa3b9ff1f215e8c96497e6d3b30403a77efdb46660ef3d2b55397
|
#!/usr/bin/python
"""
Classes to read and process MedLine XML record files, for use in processing modules.
They can handle the XML format used by PubMed and MedLine services, for example as returned by eutils online services
>>> xml_records = urllib.urlopen('http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id='+list_of_pmids+'&retmode=xml').read()
When used, an object is created as a global repository, from which records (also objects) can be queried and extracted. These record-objects have properties like title, authors, abstracts that return their string values.
Somewhat long loading times can be shortened later by serializing objects using cPickle module
USAGE:
>>> from BioReader import *
>>> data = DataContainer('AllAbstracts.xml','pubmed')
>>> data.howmany # len(data.dictRecords.keys())
>>> data.keys # data.dictRecords.keys()
>>> record = data.Read('7024555')
>>> record.title
u'The birA gene of Escherichia coli encodes a biotin holoenzyme synthetase.'
record +
- B{.title}
- B{.pmid}
- B{.Abs} I{(abstracts)}
- B{.year}
- B{.journal}
- B{.auth} I{(list of authors)}
- B{.m} I{(list of MeSH keywords, descriptors and qualifiers)}
- B{.MD} I{(MesH Descriptors)}
- B{.MQ} I{(MesH Qualifiers, if any)}
- B{.MDMay} I{(list of Mayor MesH Descriptors, if any)}
- B{.MQMay} I{(list of Mayor MesH Qualifiers, if any)}
- B{.paper} I{(full text flat file if exists in user-defined repository
The Search method inside the DataContainer class is not working well, and should be rewritten using better XML techniques and methods
A class (CreateXML) has been added recently to create the pubmed XML file from a list of PubMed ids. Has not been fully integrated with the data container class
Another class shoud be able to query keywords directly to pubmed, to either get the pubmed ids or the xml directly, using either BioPython's PubMed modules of directly through Eutil's facilities
"""
#__docformat__ = 'epytext en'
# General info
__version__ = '5.0'
__author__ = 'Carlos Rodriguez'
__url__ = 'http://www.cnio.es'
__license__ = 'GNU'
from xml.dom.minidom import parseString
import string
import re
import os
class BioReader:
"""
Class BioReader for BioMedical files
"""
def __init__(self, string, path=None):
"""
Initialize class with XML string and returns record data and body of text objects.
>>> single_record = BioReader(record)
>>> single_record.title
u'The birA gene of Escherichia coli encodes a biotin holoenzyme synthetase.'
>>> single_record.pmid
u'7024555'
single_record +
- B{.title}
- B{.pmid}
- B{.Abs} I{(abstracts)}
- B{.year}
- B{.journal}
- B{.auth} I{(list of authors)}
- B{.m} I{(list of MeSH keywords, descriptors and qualifiers)}
- B{.MD} I{(MesH Descriptors)}
- B{.MQ} I{(MesH Qualifiers, if any)}
- B{.MDMay} I{(list of Mayor MesH Descriptors, if any)}
- B{.MQMay} I{(list of Mayor MesH Qualifiers, if any)}
- B{.paper} I{(full text flat file if exists in user-defined repository [see notes below])}
If we use a repository with full text papers (with pmid+<pmidnumber>+txt format),
we can use the following, after specifying it in the Data Container we instantiated:
>>> data.Repository("/repositorio/Regulontxt/")
>>> record = data.dictRecords['9209026']
>>> single_record = BioReader(record,data.repository)# or directly inputing path, if it was not done\\
through the DataContainer class: single_record = BioReader(record,'/path/to/repository/')
>>> single_record.paper
'Aerobic Regulation of the sucABCD Genes of Escherichia coli,Which Encode \xef\xbf\xbd-Ketoglutarate Dehydrogenase andSuccinyl Coenzyme A Synthetase: Roles of ArcA,Fnr, and the Upstream sdhCDAB Promoter\n.....'
"""
self.tags = re.compile("<.*?>")
self.parsed = parseString(string)
self.document = self.parsed.documentElement
self.pmid = self.document.getElementsByTagName("PMID")[0].firstChild.data
self.year = self.document.getElementsByTagName("DateCreated")[0].getElementsByTagName("Year")[0].firstChild.data
self.journal = self.document.getElementsByTagName("MedlineJournalInfo")[0].getElementsByTagName("MedlineTA")[0].firstChild.data
self.testAbs = self.document.getElementsByTagName("Abstract")
if path != None:
self.path = path
self.paper = self.GetFullPaper()
else:
self.path = None
self.paper = None
try:
self.year = self.document.getElementsByTagName("PubDate")[0].getElementsByTagName("Year")[0].firstChild.data
except IndexError:
self.year = self.document.getElementsByTagName("DateCreated")[0].getElementsByTagName("Year")[0].firstChild.data
try:
self.Abs = self.document.getElementsByTagName("Abstract")[0].getElementsByTagName("AbstractText")[0].firstChild.data
except IndexError:
self.Abs = "n/a"
self.title = self.document.getElementsByTagName("ArticleTitle")[0].firstChild.data
try:
self.authorsList = self.document.getElementsByTagName("AuthorList")[0].getElementsByTagName("Author")
self.Lista = [self.authorize(y.childNodes) for y in self.authorsList]
s = ""
for x in self.Lista:
s = s + x + "\n"
self.auth = s
except AttributeError:
self.auth = " "
except IndexError:
self.auth = " "
try:
self.meshes = self.document.getElementsByTagName("MeshHeadingList")[0].getElementsByTagName("MeshHeading")
self.ListaMs = [self.Meshes(z.childNodes) for z in self.meshes]
self.MD = []
self.MQ = []
self.MDMay = []
self.MQMay = []
for z in self.meshes:
MD,MQ,MDMay,MQMay = self.MeshKeys(z)
self.MD = MD + self.MD
self.MQ = MQ + self.MQ
self.MDMay = MDMay + self.MDMay
self.MQMay = MQMay + self.MQMay
self.m = ""
for x in self.ListaMs:
self.m = x+" \n "+self.m
#self.p = None
except IndexError:
self.m = "n/a"
self.meshes = "n/a"
self.MQ = None
self.MD = None
self.MDMay = None
self.MQMay = None
#self.p = None
#from DataContainer import repository
#self.authors = string.join( self.Lista )#[self.authorize(x)+"\n" for x in self.Lista]
def __repr__(self):
return "<BioReader record instance: pmid: "+self.pmid+" title: "+self.title+" abstract: "+self.Abs+">"
def authorize(self, node):
s = ""
for z in node:
f = z.toxml()
f = re.sub(self.tags,"",f)
f = re.sub("\n","",f)
f = re.sub("\t"," ",f)
f = re.sub(" ","",f)
s = s + f+" "
return s
def Meshes(self, node):
s = ""
for z in node:
f = z.toxml()
f = re.sub(self.tags,"",f)
f = re.sub("\n","",f)
f = re.sub("\t"," ",f)
f = re.sub(" ","",f)
s = s + f+" "
return s
def MeshKeys(self,node):
"""
Create sets of MesH Keywords, separating qualifiers and descriptors, as well as //
MajorTopics for each one. returns Lists.
"""
listDescriptors = node.getElementsByTagName("DescriptorName")
listQualifiers = node.getElementsByTagName("QualifierName")
MD = [x.firstChild.data for x in listDescriptors]
MQ = [x.firstChild.data for x in listQualifiers]
MQMay = [q.firstChild.data for q in listQualifiers if (q.getAttribute("MajorTopicYN") == "Y")]
MDMay = [q.firstChild.data for q in listDescriptors if (q.getAttribute("MajorTopicYN") == "Y")]
return MD,MQ,MDMay,MQMay
def GetFullPaper(self):
"""
Gets the full paper from the path of an (optional) repository.
The full papers must have the following format:
pmid+<pmidnumber>+.txt (last extension optional)
"""
pmidList = os.listdir(self.path)
if pmidList[0][-4:] == '.txt':
pmidList = [x[4:-4] for x in pmidList]
formato = 1
else:
pmidList = [x[4:] for x in pmidList]
formato = None
if self.pmid in pmidList:
if formato:
self.paper = open(self.path+"pmid"+self.pmid+".txt").read()
return self.paper
else:
self.paper = open(self.path+"pmid"+self.pmid).read()
return self.paper
else:
self.paper = None
class DataContainer:
"""
Data container for Pubmed and Medline XML files.
The instance creates a dictionary object (dictRecords) of PMIDs,
referenced to string of record, which BioReader class can parse.
The method C{Read} creates a queryable object for each record assoicated with a PMID:
>>> from BioReader import *
>>> data = DataContainer('AllAbs.xml','pubmed')
>>> data.dictRecords.keys()[23]
>>> u'7024555'
>>> data.howmany
>>> 14350
1) Method One
>>> record = data.Read('7024555')
>>> record.title
u'The birA gene of Escherichia coli encodes a biotin holoenzyme synthetase.'
record +
- B{.title}
- B{.pmid}
- B{.Abs} I{(abstracts)}
- B{.year}
- B{.journal}
- B{.auth} I{(list of authors)}
- B{.m} I{(list of MeSH keywords, descriptors and qualifiers)}
- B{.MD} I{(MesH Descriptors)}
- B{.MQ} I{(MesH Qualifiers, if any)}
- B{.MDMay} I{(list of Mayor MesH Descriptors, if any)}
- B{.MQMay} I{(list of Mayor MesH Qualifiers, if any)}
- B{.paper} I{(full text flat file if exists in user-defined repository [see notes below])}
If we use a repository with full text papers
(with pmid+<pmidnumber>+txt format (extension optional),
we can use the following, after specifying it in the DataContainer we instantiated:
>>> data.Repository("/repositorio/Regulontxt/")
>>> record.paper
'Aerobic Regulation of the sucABCD Genes of Escherichia coli, Which Encode \xef\xbf\xbd-Ketoglutarate Dehydrogenase andSuccinyl Coenzyme A Synthetase: Roles of ArcA,Fnr, and the Upstream sdhCDAB Promoter\n.....
2) Method two
>>> record = data.dictRecords['7024555']
>>> single_record = BioReader(record)
>>> single_record.title
>>> u'The birA gene of Escherichia coli encodes a biotin holoenzyme synthetase.' etc ...
(See L{BioReader})
"""
def __init__(self,file,format="medline"):
"""
Initializes class and returns record data and body of text objects
"""
import time
tinicial = time.time()
self.file = file
whole = open(self.file).read()
if format.lower() == "medline":
self.rerecord = re.compile(r'\<MedlineCitation Owner="NLM" Status="MEDLINE"\>'r'(?P<record>.+?)'r'\</MedlineCitation\>',re.DOTALL)
elif format.lower() == "pubmed":
self.rerecord = re.compile(r'\<PubmedArticle\>'r'(?P<record>.+?)'r'\</PubmedArticle\>',re.DOTALL)
else:
print "Unrecognized format"
self.RecordsList = re.findall(self.rerecord,whole)
whole = ""
self.RecordsList = ["<PubmedArticle>"+x.rstrip()+"</PubmedArticle>" for x in self.RecordsList]
self.dictRecords = self.Createdict()
self.RecordsList = []
self.howmany = len(self.dictRecords.keys())
self.keys = self.dictRecords.keys()
tfinal = time.time()
self.repository = None
print "finished loading at ",time.ctime(tfinal)
print "loaded in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
def __repr__(self):
return "<BioReader Data Container Instance: source filename: "+self.file+" \nnumber of files: "+str(self.howmany)+">"
def Repository(self,repository):
"""
Establish path to a full text repository, in case you want to use that variable in the BioReader
"""
self.repository = repository
return self.repository
def Createdict(self):
"""
Creates a dictionary with pmid number indexing record xml string
"""
i = 0
dictRecords = {}
for p in self.RecordsList:
r = BioReader(p)
dictRecords[r.pmid] = self.RecordsList[i]
i += 1
return dictRecords
def Read(self,pmid):
if self.repository:
self.record = BioReader(self.dictRecords[pmid],self.repository)
else:
self.record = BioReader(self.dictRecords[pmid])
return self.record
def Search(self,cadena,where=None):
"""
This method is not working. Needs to be redone to comply with more up-to-date XML search methods
Searches for "cadena" string inside the selected field, and returns a list of pmid where it was found.
If not "where" field is provided, will search in all of the record.
You can search in the following fields:
- title
- year
- journal
- auth or authors
- 'abs' or 'Abs' or 'abstract'
- paper or "full" (if full-text repository has been defined)
- pmid
With defined field search is very slow but much more accurate. See for comparison:
>>> buscados = data.Search("Richard")
Searched in 0.110424995422 seconds, or 0.00184041659037 minutes
Found a total of 75 hits for your query, in all fields
>>> buscados = data.Search("Richard","auth")
Searched in 66.342936039 seconds, or 1.10571560065 minutes
Found a total of 75 hits for your query, in the auth field
"""
tinicial = time.time()
resultlist = []
if where:
for cadapmid in self.dictRecords.keys():
d = self.Read(cadapmid)
if where == 'title':
tosearch = d.title
elif where == 'year':
tosearch = d.year
elif where == 'journal':
tosearch = d.journal
elif where == ('auth' or 'authors'):
tosearch = d.auth
elif where == ('m' or 'mesh'):
tosearch = d.m
elif where == ('abs' or 'Abs' or 'abstract'):
tosearch = d.Abs
elif where == ('paper' or 'full'):
tosearch = d.paper
if self.repository:
pass
else:
print "No full text repository has been defined...."
return None
elif where == 'pmid':
tosearch = d.pmid
hit = re.search(cadena,tosearch)
if hit:
resultlist.append(d.pmid)
else:
pass
if len(resultlist)!= 0:
tfinal = time.time()
print "Searched in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
print "Found a total of ",str(len(resultlist))," hits for your query, in the ",where," field"
return resultlist
else:
print "Searched in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
print "Query not found"
return None
else:
tosearch = ''
for cadapmid in self.dictRecords.keys():
tosearch = self.dictRecords[cadapmid]
hit = re.search(cadena,tosearch)
if hit:
resultlist.append(cadapmid)
else:
pass
if len(resultlist)!= 0:
tfinal = time.time()
print "Searched in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
print "Found a total of ",str(len(resultlist))," hits for your query, in all fields"
return resultlist
else:
tfinal = time.time()
print "Searched in", tfinal-tinicial," seconds, or",((tfinal-tinicial)/60)," minutes"
print "Query not found"
return None
class CreateXML:
"""
Class to generate PubMed XMLs from a list of ids (one per line), to use with BioRea.
downloads in 100 batch.
Usage:
outputfile = "NuevosPDFRegulon.xml"
inputfile = "/home/crodrigp/listaNuevos.txt"
>>> XMLCreator = CreateXML()
>>> XMLCreator.GenerateFile(inputfile,outputfile)
>>> parseableString = XMLCreator.Generate2String(inputfile)
or
>>> XMLString = XMLCreator.Generate2String()
"""
def __init__(self):
#global urllib,time,string,random
import urllib,time,string,random
def getXml(self,s):
pedir = urllib.urlopen("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id="+s+"&retmode=xml")
stringxml = pedir.read()
self.salida.write(stringxml[:-20]+"\n")
def getXmlString(self,s):
pedir = urllib.urlopen("http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id="+s+"&retmode=xml")
stringxml = pedir.read()
return stringxml[:-20]+"\n"
def listastring(self,list):
suso = string.join(list,",")
return suso
def GenerateFile(self,inputfile,outputfile):
self.outputfile = outputfile
self.inputfile = inputfile
self.salida = open(self.outputfile,"w")
self.listaR = open(self.inputfile).readlines()
self.listafin = [x.rstrip() for x in self.listaR]
self.listacorr = []
while self.listafin != []:
if len(self.listafin) < 100:
cientos = self.listafin[:]
#self.listafin = []
else:
cientos = self.listafin[:100]
print "new length self.listacorr", len(self.listafin)
if len(self.listafin) <= 0:
break
else:
#time.sleep(120)
nueva = self.listastring(cientos)
self.getXml(nueva)
for c in cientos:
print c
self.listafin.remove(c)
self.salida.close()
def Generate2String(self,inputfile):
self.inputfile = inputfile
self.listaR = open(self.inputfile).readlines()
self.AllXML = ''
self.listafin = [x.rstrip() for x in self.listaR]
self.listacorr = []
while self.listafin != []:
if len(self.listafin) < 100:
cientos = self.listafin[:]
#self.listafin = []
else:
cientos = self.listafin[:100]
print "new length self.listacorr", len(self.listafin)
if len(self.listafin) <= 0:
break
else:
time.sleep(120)
nueva = self.listastring(cientos)
newX = self.getXmlString(nueva)
self.AllXML = self.AllXML + newX
for c in cientos:
print c
self.listafin.remove(c)
return self.AllXML
|
hectormartinez/rougexstem
|
taln2016/icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/bioreader/bioreader.py
|
Python
|
apache-2.0
| 20,328
|
[
"Biopython"
] |
557d06b35c734a239a91fc4718e7e3705208fe1479a63d2a0cbb8cfb18d89093
|
from pypuf.simulation.arbiter_based.ltfarray import LTFArray
from numpy.random import RandomState
from pypuf.learner.base import Learner
from pypuf.learner.liu.partition import getChallenge
from pypuf.learner.liu.chebyshev import findCenter
from pypuf.learner.liu.chebyshev2 import findCenter2
from pypuf.learner.liu.simplex import AdjustedSimplexAlg
from numpy import full, double, ones, sum, sign, sqrt, minimum
from pypuf import tools
from sys import stderr
class PolytopeAlgorithm(Learner):
def __init__(self,orig_LTFArray, t_set, n, k, transformation=LTFArray.transform_id, combiner=LTFArray.combiner_xor, weights_mu=0, weights_sigma=1, weights_prng=RandomState()):
"""
Initialize a LTF Array Polytope Learner for the specified LTF Array.
:param t_set: The training set, i.e. a data structure containing challenge response pairs
:param n: Input length
:param k: Number of parallel LTFs in the LTF Array
:param transformation: Input transformation used by the LTF Array
:param combiner: Combiner Function used by the LTF Array (Note that not all combiner functions are supported by this class.)
:param weights_mu: mean of the Gaussian that is used to choose the initial model
:param weights_sigma: standard deviation of the Gaussian that is used to choose the initial model
:param weights_prng: PRNG to draw the initial model from. Defaults to fresh `numpy.random.RandomState` instance.
"""
self.orig_LTFArray=orig_LTFArray
self.iteration_count = 0
self.__training_set = t_set
self.n = n
self.k = k
self.weights_mu = weights_mu
self.weights_sigma = weights_sigma
self.weights_prng = weights_prng
self.iteration_limit = 10000
self.convergence_decimals = 3
self.sign_combined_model_responses = None
self.sigmoid_derivative = full(self.training_set.N, None, double)
self.min_distance = 1
self.transformation = transformation
self.combiner = combiner
self.transformed_challenges = self.transformation(self.training_set.challenges, k)
assert self.n == len(self.training_set.challenges[0])
@property
def training_set(self):
return self.__training_set
@training_set.setter
def training_set(self, val):
self.__training_set = val
def learn(self):
model = LTFArray(
weight_array=LTFArray.normal_weights(self.n, self.k, self.weights_mu, self.weights_sigma, self.weights_prng),
transform=self.transformation,
combiner=self.combiner,
)
self.iteration_count = 0
challenges=[]
responses=[]
challenges.append(ones(self.n))
responses.append(self.__signum(sum(self.orig_LTFArray.weight_array*challenges)))
while self.iteration_count < self.iteration_limit:
self.__updateModel(model)
stderr.write('\riter %5i \n' % (self.iteration_count))
self.iteration_count += 1
[center,radius] = self.__chebyshev_center(challenges, responses)
stderr.write("radius ")
stderr.write("%f\n"%radius)
stderr.write("distance ")
model.weight_array=[center]
distance = tools.approx_dist(self.orig_LTFArray, model, min(10000, 2 ** model.n))
self.min_distance = min(distance, self.min_distance)
if (distance < 0.01):
break
minAccuracy=abs(radius*sqrt(model.n))
stderr.write("%f\n"%distance)
newC=self.__closest_challenge(center, minAccuracy);
challenges.append(newC)
responses.append(self.__signum(sum(newC*self.orig_LTFArray.weight_array)))
return model
def __chebyshev_center(self,challenges,responses):
#simplex=AdjustedSimplexAlg()
#[cOwn,rOwn]= simplex.solve(challenges,responses)
[cNormal,rNormal] =findCenter(challenges,responses)
#stderr.write("own=\n")
#stderr.write("%f"%rOwn)
#simplex.printSol(cOwn)
#stderr.write("normal=\n")
#stderr.write("%f"%rNormal)
#simplex.printSol(cNormal)
return [cNormal,rNormal]
#return [cOwn,rOwn]
def __closest_challenge(self, center, minAccuracy):
return getChallenge(center, minAccuracy)
#challenge=zeros(self.n)
#for i in range(self.n):
# challenge[i]=self.__signum(random.random()-0.5)
#return challenge;
def __signum(self,x):
if sign(x)!=0:
return sign(x)
else:
return -1
def __updateModel(self,model):
model_responses = model.ltf_eval(self.transformed_challenges)
combined_model_responses = self.combiner(model_responses)
self.sign_combined_model_responses = sign(combined_model_responses)
MAX_RESPONSE_ABS_VALUE = 50
combined_model_responses = sign(combined_model_responses) * \
minimum(
full(len(combined_model_responses), MAX_RESPONSE_ABS_VALUE, double),
abs(combined_model_responses)
)
|
nicostephan/pypuf
|
pypuf/learner/liu/polytope_algorithm.py
|
Python
|
gpl-3.0
| 5,502
|
[
"Gaussian"
] |
1e6dccf66152fca0e1c7fe05be32e79df2e8e02a96d659d95134b06a0cb7c3b3
|
# -*- coding: utf-8 -*-
"""
This is a deprecated backend. :mod:`~xrt.backends.raycing` is much more
functional. Module :mod:`~xrt.backends.shadow` works with shadow input files,
starts the ray-tracing and gets its output.
.. warning::
Shadow3 is not supported.
.. warning::
xrtQook and xrtGlow do not work with this backend. A beamline created in
Shadow cannot be visualized by xrtGlow.
Description of shadow
---------------------
... can be found in the manual pages of your shadow distribution. In connection
with using shadow in xrt, it is important to understand the naming of
the output beam files and the naming of parameters in the setup files.
Preparation for a shadow run
----------------------------
.. note:: on shadow under Windows Vista and 7:
Under Windows Vista and 7 shadow does not work out of the box because of
``epath`` (a part of shadow) reporting an error. There is a workaround
consisting of simply stopping the Windows’ Error Reporting Service.
Create a folder where you will store your ray-tracing script and the output
images. Make it as Python's working directory. Create there a sub-folder
``tmp0``. Put there your shadow project file along with all necessary data
files (reflectivities, crystal parameters etc.). Run shadow and make sure it
correctly produces output files you want to accumulate (like ``star.01``).
Now you need to generate two command files that run shadow source and shadow
trace. These are system-specific and also differ for different shadow sources.
Under Windows, this can be done as follows: set the working directory of
shadowVUI as your ``tmp0``, run Source in shadowVUI and rename the produced
``shadowvui.bat`` to ``shadow-source.bat``; then run Trace and rename the
produced ``shadowvui.bat`` to ``shadow-trace.bat``.
Try to run the generated command files in order to check their validity.
If you want to use multi-threading then copy ``tmp0`` to ``tmp1``, ``tmp2``
etc. (in total as many directories as you have threads).
.. _scriptingShadow:
Scripting in python
-------------------
The simplest script consists of 4 lines::
import xrt.runner as xrtr
import xrt.plotter as xrtp
plot1 = xrtp.XYCPlot('star.01')
xrtr.run_ray_tracing(plot1, repeats=40, updateEvery=2, threads=1)
"""
__author__ = "Konstantin Klementiev, Roman Chernikov"
__date__ = "10 Apr 2015"
import os
import time
import numpy as np
import subprocess
# import psyco #!!!psyco speeds it up!!!
# psyco.full()
_sourceAsciiFile = 'start.00'
# _DEBUG = True
def read_input(fileName, vtype, *getlines):
"""
reads a shadow text input file (like ``start.NN``) which consists of lines
``field = value``.
Parameters:
*fileName*: str
*vtype*: {int|str|float}
Type of the returned value.
*getlines*: list of strings
Returns:
*results*: list
a list of values which correspond to the list *getlines* if
successful, otherwise -1.
Example:
>>> fPolar = read_input('start.00', int, 'f_polar')[0]
"""
lines = []
f = None
try:
f = open(fileName, 'rU')
for line in f:
lines.append(line.split())
except IOError:
print("The file ", fileName, " does not exist or corrupt!")
return -1
finally:
if f:
f.close()
results = []
for el in getlines:
for line in lines:
if line[0].lower() == el.lower():
results.append(vtype(line[2]))
break
if len(results) == 0:
raise Exception(
"The parameter(s) %s cannot be found in %s" % (getlines, fileName))
return results
def modify_input(fileNameList, *editlines):
"""
modifies a shadow text input file (like ``start.NN``) which consists of
lines ``field = value``.
Parameters:
*fileNameList*: str or list of str
A list of file names is useful when executing shadow in parallel in
several directories.
*editlines*: list of tuples of strings (field, value).
Returns:
0 if successful, otherwise -1.
Example:
>>> modify_input('start.00',('istar1',str(seed))) #change seed
"""
if isinstance(fileNameList, str):
locFileNameList = [fileNameList, ]
elif isinstance(fileNameList, (tuple, list)):
locFileNameList = fileNameList
else:
print(type(fileNameList))
print("Wrong fileNameList parameter!")
return -1
for fileName in locFileNameList:
lines = []
f = None
try:
f = open(fileName, 'rU')
for line in f:
lines.append(line.split())
except IOError:
print("The file ", fileName, " does not exist or corrupt!")
return -1
finally:
if f:
f.close()
for el in editlines:
for line in lines:
if line[0].lower() == el[0].lower():
line[2] = el[1]
if len(line) > 2: # otherwise trapped by "none specified"
del line[3:]
break
f = None
try:
f = open(fileName, 'w')
for line in lines:
f.write(' '.join(line) + '\n')
finally:
if f:
f.close()
return 0
def modify_xsh_input(fileNameList, *editlines):
"""
modifies a shadow xsh text input file (like ``xsh_nphoton_tmp.inp``) which
consist of lines of values, one value per line.
Parameters:
*fileNameList*: str or list of str
A list of file names is useful when executing shadow in parallel in
several directories.
*editlines*: list of tuples (*fieldNo*: int, *value*: str)
*fieldNo* is zero-based index of the modified parameter.
Returns:
0 if successful, otherwise -1.
Example:
>>> modify_xsh_input('xsh_nphoton_tmp.inp', (2, energyRange[0]),
(3, energyRange[1]))
"""
if isinstance(fileNameList, str):
locFileNameList = [fileNameList, ]
elif isinstance(fileNameList, (tuple, list)):
locFileNameList = fileNameList
else:
print("Wrong fileNameList parameter!")
return -1
for fileName in locFileNameList:
lines = []
tryAgain = True
f = None
while tryAgain:
# several attempts instead of file locking, which is
# easier because is not system dependent.
try:
f = open(fileName, 'rU')
for line in f:
lines.append(line)
except IOError:
print("The file ", fileName, " does not exist or corrupt!")
return -1
finally:
if f:
f.close()
try:
for el in editlines:
lines[el[0]] = str(el[1]) + '\n'
except:
time.sleep(0.1)
continue
f = None
try:
f = open(fileName, 'w')
for line in lines:
f.write(line)
except:
time.sleep(0.1)
continue
finally:
if f:
f.close()
f = None
try:
f = open(fileName, 'rU')
fsize = os.path.getsize(fileName)
if fsize <= 0:
raise IOError()
except IOError:
time.sleep(0.1)
continue
else:
tryAgain = False
finally:
if f:
f.close()
return 0
def read_bin_file(binFileName, _f_polar, _blockNRays, lostRayFlag=1):
"""
Reads a binary shadow output file (like ``star.NN``, ``mirr.NN`` or
``screen.NNMM``).
Parameters:
*binFileName*: str
*f_polar*: int
determines the number of columns stored in binFileName.
*lostRayFlag*: 1=only good, 0=only lost, 2=all rays
Returns:
*d*: ndarray, shape(*NumberOfRays*, *NumberOfColumns*)
*NumberOfRays* is determined by *lostRayFlag* field.
*NumberOfColumns* is determined by the field *f_polar* in the
source input file.
*intensity*: ndarray, shape(*NumberOfRays*,)
The array of intensity for each ray.
"""
if _f_polar == 1:
shadowColums = 19
else:
shadowColums = 14
tryAgain = True
while tryAgain:
# several attempts instead of file locking, which is
# easier because is not system dependent.
f = None
try:
f = open(binFileName, 'rb')
# header =
np.fromfile(f, dtype=np.uint32, count=6)
# shadow writes rays in a weird way (see putrays.pro).
# It writes a dummy structure tmp = byte([12,0,0,0]) twice in
# every write operator (why?):
# writeu,Unit,tmp,a.ncol,a.npoint,0L,tmp ;write the header information
# writeu,Unit,tmp,ray,tmp
# The array of rays (25000x14) or (25000x19) appears to start at the offset
# of 24 bytes but the file size is 4 bytes less (why?).
# Therefore I add one zero field at the end in order to be able to reshape.
d = np.fromfile(f, dtype=np.float64)
except IOError:
print("The file ", binFileName, " does not exist or corrupt!")
return -1
finally:
if f:
f.close()
try:
d = np.concatenate((d, [0, ]))
d = d.reshape(-1, shadowColums)
locNrays = d.shape[0]
if locNrays != _blockNRays:
raise ValueError()
except ValueError:
time.sleep(0.1)
continue
else:
tryAgain = False
if lostRayFlag == 1:
d = d[d[:, 9] == 1]
elif lostRayFlag == 0:
d = d[d[:, 9] < 0]
# energy in shadow is in cm^-1:
d[:, 10] /= 50676.89778440964400 # 1/ch constant [eV cm]^-1
intensity = np.square(d[:, 6]) + np.square(d[:, 7]) + np.square(d[:, 8])
if _f_polar == 1:
intensity += \
np.square(d[:, 15]) + np.square(d[:, 16]) + np.square(d[:, 17])
# it returns a matrix with the columns listed in class XYCPlot
return d, intensity, locNrays
def check_shadow_dirs(processes, cwd):
"""
Assures that tmp0, tmp1 etc. exist
"""
nonExistingDirs = []
for iprocess in range(processes):
tmpDir = cwd + os.sep + 'tmp' + str(iprocess)
if not os.path.exists(tmpDir):
nonExistingDirs.append(tmpDir)
if len(nonExistingDirs) > 0:
if len(nonExistingDirs) == 1:
raise Exception("directory %s must exist!" % nonExistingDirs[0])
else:
raise Exception("directories %s must exist!" % nonExistingDirs)
def init_shadow(processes, cwd, energyRange):
"""
Initializes the work of shadow: determines the source type, the number of
columns and the number of rays.
"""
tmpDir = 'tmp0' + os.sep
# determines the source type, which determines the input type
fWiggler = read_input(tmpDir + _sourceAsciiFile, int, 'f_wiggler')
if fWiggler == -1:
return
_fWiggler = fWiggler[0]
# reads f_polar field, which determines the number of columns
fPolar = read_input(tmpDir + _sourceAsciiFile, int, 'f_polar')
if fPolar == -1:
return
_fPolar = fPolar[0]
# reads the number of rays in each shadow run, usually =25000.
blockNRays = read_input(tmpDir + _sourceAsciiFile, int, 'npoint')
if blockNRays == -1:
return
_blockNRays = blockNRays[0]
for iprocess in range(processes):
tmpDir = cwd + os.sep + 'tmp' + str(iprocess)
init_process(tmpDir, energyRange, _fWiggler)
# print("init shadow finished")
return _fWiggler, _fPolar, _blockNRays
def init_process(runDir, energyRange, _fWiggler):
"""
Sets the energy range.
"""
if energyRange is not None: # change Emin and Emax
if _fWiggler == 1:
if modify_xsh_input(
runDir + os.sep + 'xsh_nphoton_tmp.inp',
(2, energyRange[0]), (3, energyRange[1])) == -1:
return
elif _fWiggler == 2:
if modify_xsh_input(
runDir + os.sep + 'xsh_undul_set_tmp.inp',
(7, energyRange[0]), (8, energyRange[1])) == -1:
return
if modify_input(runDir + os.sep + _sourceAsciiFile,
('ph1', str(energyRange[0])),
('ph2', str(energyRange[1]))) == -1:
return
def files_in_tmp_subdirs(fileName, processes=1):
"""
Creates and returns a list of full file names of copies of a given file
located in the process directories. This list is needed for reading and
writing to several versions of one file (one for each process) in one go.
Useful in user's scripts.
Example:
>>> start01 = shadow.files_in_tmp_subdirs('start.01', processes=4)
>>> shadow.modify_input(start01, ('THICK(1)', str(thick * 1e-4)))
"""
filesInTmpSubdirsList = []
for iprocess in range(processes):
fName = 'tmp' + str(iprocess) + os.sep + fileName
filesInTmpSubdirsList.append(fName)
return filesInTmpSubdirsList
def run_process(args, _fWiggler, runDir):
"""
Changes the seed for shadow Source and runs shadow.
Parameters
----------
*args*: str
What to run: 'source' or 'trace'
"""
if args == 'source':
# np.random.seed(0)
seed = 2 * np.random.random_integers(50, 5e4-1) + 1
modify_input(runDir + os.sep + _sourceAsciiFile,
('istar1', str(seed))) # change seed
if _fWiggler != 0:
# change seed
modify_xsh_input(
runDir + os.sep + 'xsh_input_source_tmp.inp', (3, seed))
if os.name == 'nt':
genStr = ''.join(['shadow-', args, '.bat'])
close_fds = False
elif os.name == 'posix':
genStr = ''.join(['./', 'shadow-', args, '.sh'])
close_fds = True
else:
print("not supported OS")
return -10
errptr = None
outptr = None
# Create output log file
outFile = os.path.join(runDir, ''.join(['output_', args, '.log']))
outptr = open(outFile, "w")
# Create error log file
errFile = os.path.join(runDir, ''.join(['error_', args, '.log']))
errptr = open(errFile, "w")
try:
retcode = subprocess.call(genStr, shell=True, close_fds=close_fds,
stdout=outptr, stderr=errptr, cwd=runDir)
if retcode != 0:
errData = errptr.read()
raise Exception("Error executing command: " + repr(errData))
except Exception as inst:
print(inst.args, ", ignored...")
return retcode
finally:
# Close log handles
if errptr:
errptr.close()
if outptr:
outptr.close()
return retcode
def get_output(plot, _fPolar, _blockNRays, dir=None):
"""
Returns the plotting arrays from the shadow output.
"""
if dir:
fName = ''.join([dir, os.sep, plot.beam])
else:
fName = plot.beam
raysArray, intensity, locNrays = read_bin_file(fName, _fPolar, _blockNRays,
plot.rayFlag)
if isinstance(plot.xaxis.data, int):
x = raysArray[:, plot.xaxis.data] * plot.xaxis.factor
else:
x = plot.xaxis.data * plot.xaxis.factor
if isinstance(plot.yaxis.data, int):
y = raysArray[:, plot.yaxis.data] * plot.yaxis.factor
else:
y = plot.yaxis.data * plot.yaxis.factor
if isinstance(plot.caxis.data, int):
cData = raysArray[:, plot.caxis.data] * plot.caxis.factor
else:
cData = plot.caxis.data * plot.caxis.factor
locNraysNeeded = raysArray.shape[0]
return x, y, intensity, cData, locNrays, locNraysNeeded
|
kklmn/xrt
|
xrt/backends/shadow.py
|
Python
|
mit
| 16,156
|
[
"CRYSTAL"
] |
075973981f72a44d673125f7cdcc262e7ab36969a56982a80041b20517279b04
|
import numpy as np
from cora.signal import corr21cm
from cora.foreground import galaxy
def test_corr_signal():
"""Test that the signal power spectrum is being calculated correctly.
Correct here is referenced to a specific version believed to have no errors.
"""
cr = corr21cm.Corr21cm()
aps1 = cr.angular_powerspectrum(np.arange(1000), 800.0, 800.0)
assert len(aps1) == 1000
assert np.allclose(
aps1.sum(), 1.5963772205823096e-09, rtol=1e-7
) # Calculated for commit 02f4d1cd3f402d
fa = np.linspace(400.0, 800.0, 64)
aps2 = cr.angular_powerspectrum(
np.arange(1000)[:, None, None], fa[None, :, None], fa[None, None, :]
)
assert aps2.shape == (1000, 64, 64)
# Calculated for commit 02f4d1cd3f402d
v1 = 8.986790805379046e-13 # l=400, fi=40, fj=40
v2 = 1.1939298801340165e-18 # l=200, fi=10, fj=40
assert np.allclose(aps2[400, 40, 40], v1, rtol=1e-7)
assert np.allclose(aps2[200, 10, 40], v2, rtol=1e-7)
def test_corr_foreground():
"""Test that the foreground power spectrum is being calculated correctly.
Correct here is referenced to a specific version believed to have no errors.
"""
cr = galaxy.FullSkySynchrotron()
aps1 = cr.angular_powerspectrum(np.arange(1000), 800.0, 800.0)
assert len(aps1) == 1000
assert np.allclose(
aps1.sum(), 75.47681191093129, rtol=1e-7
) # Calculated for commit 02f4d1cd3f402d
fa = np.linspace(400.0, 800.0, 64)
aps2 = cr.angular_powerspectrum(
np.arange(1000)[:, None, None], fa[None, :, None], fa[None, None, :]
)
assert aps2.shape == (1000, 64, 64)
# Calculated for commit 02f4d1cd3f402d
v1 = 9.690708728692975e-06 # l=400, fi=40, fj=40
v2 = 0.00017630767166797886 # l=200, fi=10, fj=40
assert np.allclose(aps2[400, 40, 40], v1, rtol=1e-7)
assert np.allclose(aps2[200, 10, 40], v2, rtol=1e-7)
|
radiocosmology/cora
|
tests/test_corr.py
|
Python
|
mit
| 1,914
|
[
"Galaxy"
] |
fe0d9b60748e407463d21658daf1b732b1f724470d266a121a46f683fc157828
|
# -*- coding: utf-8 -*-
import numpy as np
from shapely.geometry.polygon import Polygon
import datetime
import netCDF4 as nc
import itertools
import geojson
from shapely.ops import cascaded_union
from shapely.geometry.multipolygon import MultiPolygon, MultiPolygonAdapter
from shapely import prepared, wkt
from shapely.geometry.geo import asShape
import time, sys
from multiprocessing import Process, Queue, Lock
from math import sqrt
from util.helpers import get_temp_path
from util.toshp import OpenClimateShp
dtime = 0
class OcgDataset(object):
"""
Wraps and netCDF4-python Dataset object providing extraction methods by
spatial and temporal queries.
dataset -- netCDF4-python Dataset object
**kwds -- arguments for the names of spatial and temporal dimensions.
rowbnds_name
colbnds_name
time_name
time_units
calendar
"""
def __init__(self,dataset,**kwds):
self.url = dataset
self.dataset = nc.Dataset(dataset,'r')
self.multiReset = kwds.get('multiReset') or False
# self.polygon = kwds.get('polygon')
# self.temporal = kwds.get('temporal')
# self.row_name = kwds.get('row_name') or 'latitude'
# self.col_name = kwds.get('col_name') or 'longitude'
## extract the names of the spatiotemporal variables/dimensions from
## the keyword arguments.
self.rowbnds_name = kwds.get('rowbnds_name') or 'bounds_latitude'
self.colbnds_name = kwds.get('colbnds_name') or 'bounds_longitude'
self.time_name = kwds.get('time_name') or 'time'
self.time_units = kwds.get('time_units') or 'days since 1950-01-01 00:00:00'
self.calendar = kwds.get('calendar') or 'proleptic_gregorian'
self.level_name = kwds.get('level_name') or 'levels'
# self.clip = kwds.get('clip') or False
# self.dissolve = kwds.get('dissolve') or False
#print self.dataset.variables[self.time_name].units
#sys.exit()
# self.row = self.dataset.variables[self.row_name][:]
# self.col = self.dataset.variables[self.col_name][:]
## extract the row and column bounds from the dataset
self.row_bnds = self.dataset.variables[self.rowbnds_name][:]
self.col_bnds = self.dataset.variables[self.colbnds_name][:]
## convert the time vector to datetime objects
self.timevec = nc.netcdftime.num2date(self.dataset.variables[self.time_name][:],
self.time_units,
self.calendar)
## these are base numpy arrays used by spatial operations.
## four numpy arrays one for each bounding coordinate of a polygon
self.min_col,self.min_row = np.meshgrid(self.col_bnds[:,0],self.row_bnds[:,0])
self.max_col,self.max_row = np.meshgrid(self.col_bnds[:,1],self.row_bnds[:,1])
## these are the original indices of the row and columns. they are
## referenced after the spatial subset to retrieve data from the dataset
self.real_col,self.real_row = np.meshgrid(np.arange(0,len(self.col_bnds)),
np.arange(0,len(self.row_bnds)))
if self.multiReset:
print 'closed'
self.dataset.close()
def _itr_array_(self,a):
"a -- 2-d ndarray"
ix = a.shape[0]
jx = a.shape[1]
for ii,jj in itertools.product(xrange(ix),xrange(jx)):
yield ii,jj
def _contains_(self,grid,lower,upper):
s1 = grid > lower
s2 = grid < upper
return(s1*s2)
def _set_overlay_(self,polygon=None,clip=False):
"""
Perform spatial operations.
polygon=None -- shapely polygon object
clip=False -- set to True to perform an intersection
"""
print('overlay...')
## holds polygon objects
self._igrid = np.empty(self.min_row.shape,dtype=object)
## hold point objects
self._jgrid = np.empty(self.min_row.shape,dtype=object)
## holds weights for area weighting in the case of a dissolve
self._weights = np.zeros(self.min_row.shape)
## initial subsetting to avoid iterating over all polygons unless abso-
## lutely necessary
if polygon is not None:
emin_col,emin_row,emax_col,emax_row = polygon.envelope.bounds
smin_col = self._contains_(self.min_col,emin_col,emax_col)
smax_col = self._contains_(self.max_col,emin_col,emax_col)
smin_row = self._contains_(self.min_row,emin_row,emax_row)
smax_row = self._contains_(self.max_row,emin_row,emax_row)
include = np.any((smin_col,smax_col),axis=0)*np.any((smin_row,smax_row),axis=0)
else:
include = np.empty(self.min_row.shape,dtype=bool)
include[:,:] = True
# print('constructing grid...')
# ## construct the subset of polygon geometries
# vfunc = np.vectorize(self._make_poly_array_)
# self._igrid = vfunc(include,
# self.min_row,
# self.min_col,
# self.max_row,
# self.max_col,
# polygon)
#
# ## calculate the areas for potential weighting
# print('calculating area...')
# def _area(x):
# if x != None:
# return(x.area)
# else:
# return(0.0)
# vfunc_area = np.vectorize(_area,otypes=[np.float])
# preareas = vfunc_area(self._igrid)
#
# ## if we are clipping the data, modify the geometries and record the weights
# if clip and polygon:
# print('clipping...')
## polys = []
## for p in self._igrid.reshape(-1):
## polys.append(self._intersection_(polygon,p))
# vfunc = np.vectorize(self._intersection_)
# self._igrid = vfunc(polygon,self._igrid)
#
# ## calculate weights following intersection
# areas = vfunc_area(self._igrid)
# def _weight(x,y):
# if y == 0:
# return(0.0)
# else:
# return(x/y)
# self._weights=np.vectorize(_weight)(areas,preareas)
#
# ## set the mask
# self._mask = self._weights > 0
#
# print('overlay done.')
## loop for each spatial grid element
if polygon:
# prepared_polygon = polygon
prepared_polygon = prepared.prep(polygon)
for ii,jj in self._itr_array_(include):
if not include[ii,jj]: continue
## create the polygon
g = self._make_poly_((self.min_row[ii,jj],self.max_row[ii,jj]),
(self.min_col[ii,jj],self.max_col[ii,jj]))
## add the polygon if it intersects the aoi of if all data is being
## returned.
if polygon:
if not prepared_polygon.intersects(g): continue
# if g.intersects(polygon) or polygon is None:
## get the area before the intersection
prearea = g.area
## full intersection in the case of a clip and an aoi is passed
# if g.overlaps(polygon) and clip is True and polygon is not None:
if clip is True and polygon is not None:
ng = g.intersection(polygon)
## otherwise, just keep the geometry
else:
ng = g
## calculate the weight
w = ng.area/prearea
## a polygon can have a true intersects but actually not overlap
## i.e. shares a border.
if w > 0:
self._igrid[ii,jj] = ng
self._weights[ii,jj] = w
self._jgrid[ii,jj] = (g.centroid.x,g.centroid.y)
## the mask is used as a subset
self._mask = self._weights > 0
# self._weights = self._weights/self._weights.sum()
def _make_poly_(self,rtup,ctup):
"""
rtup = (row min, row max)
ctup = (col min, col max)
"""
return Polygon(((ctup[0],rtup[0]),
(ctup[0],rtup[1]),
(ctup[1],rtup[1]),
(ctup[1],rtup[0])))
@staticmethod
def _make_poly_array_(include,min_row,min_col,max_row,max_col,polygon=None):
ret = None
if include:
poly = Polygon(((min_col,min_row),
(max_col,min_row),
(max_col,max_row),
(min_col,max_row),
(min_col,min_row)))
if polygon != None:
if polygon.intersects(poly):
ret = poly
else:
ret = poly
return(ret)
@staticmethod
def _intersection_(polygon,target):
ret = None
if target != None:
ppp = target.intersection(polygon)
if not ppp.is_empty:
ret = ppp
return(ret)
def _get_numpy_data_(self,var_name,polygon=None,time_range=None,clip=False,levels = [0],lock=None):
"""
var_name -- NC variable to extract from
polygon=None -- shapely polygon object
time_range=None -- [lower datetime, upper datetime]
clip=False -- set to True to perform a full intersection
"""
print('getting numpy data...')
## perform the spatial operations
self._set_overlay_(polygon=polygon,clip=clip)
def _u(arg):
"Pulls unique values and generates an evenly spaced array."
un = np.unique(arg)
return(np.arange(un.min(),un.max()+1))
def _sub(arg):
"Subset an array."
return arg[self._idxrow.min():self._idxrow.max()+1,
self._idxcol.min():self._idxcol.max()+1]
## get the time indices
if time_range is not None:
self._idxtime = np.arange(
0,
len(self.timevec))[(self.timevec>=time_range[0])*
(self.timevec<=time_range[1])]
else:
self._idxtime = np.arange(0,len(self.timevec))
## reference the original (world) coordinates of the netCDF when selecting
## the spatial subset.
self._idxrow = _u(self.real_row[self._mask])
self._idxcol = _u(self.real_col[self._mask])
## subset our reference arrays in a similar manner
self._mask = _sub(self._mask)
self._weights = _sub(self._weights)
self._igrid = _sub(self._igrid)
self._jgrid = _sub(self._jgrid)
## hit the dataset and extract the block
npd = None
narg = time.clock()
while not(lock.acquire(False)):
time.sleep(.1)
if self.multiReset:
self.dataset = nc.Dataset(dataset,'r')
##check if data is 3 or 4 dimensions
dimShape = len(self.dataset.variables[var_name].dimensions)
if dimShape == 3:
npd = self.dataset.variables[var_name][self._idxtime,self._idxrow,self._idxcol]
elif dimShape == 4:
self.levels = self.dataset.variables[self.level_name][:]
npd = self.dataset.variables[var_name][self._idxtime,levels,self._idxrow,self._idxcol]
#print npd.shape
#print self._weights
if self.multiReset:
self.dataset.close()
lock.release()
print "dtime: ", time.clock()-narg
## add in an extra dummy dimension in the case of one time layer
if len(npd.shape) == 2:
npd = npd.reshape(1,npd.shape[0],npd.shape[1])
print('numpy extraction done.')
return(npd)
def _is_masked_(self,arg):
"Ensures proper formating of masked data."
if isinstance(arg,np.ma.MaskedArray):
return None
else:
return arg
def extract_elements(self,*args,**kwds):
"""
Merges the geometries and extracted attributes into a GeoJson-like dictionary
list.
var_name -- NC variable to extract from
dissolve=False -- set to True to merge geometries and calculate an
area-weighted average
polygon=None -- shapely polygon object
time_range=None -- [lower datetime, upper datetime]
clip=False -- set to True to perform a full intersection
"""
print('extracting elements...')
## dissolve argument is unique to extract_elements
if 'dissolve' in kwds:
dissolve = kwds.pop('dissolve')
else:
dissolve = False
if 'levels' in kwds:
levels = kwds.get('levels')
if 'parentPoly' in kwds:
parent = kwds.pop('parentPoly')
else:
parent = None
clip = kwds.get('clip')
## extract numpy data from the nc file
q=args[0]
npd = self._get_numpy_data_(*args[1:],**kwds)
##check which flavor of climate data we are dealing with
ocgShape = len(npd.shape)
## will hold feature dictionaries
features = []
## partial pixels
recombine = {}
## the unique identified iterator
ids = self._itr_id_()
if dissolve:
## one feature is created for each unique time
for kk in range(len(self._idxtime)):
## check if this is the first iteration. approach assumes that
## masked values are homogenous through the time layers. this
## avoids multiple union operations on the geometries. i.e.
## time 1 = masked, time 2 = masked, time 3 = masked
## vs.
## time 1 = 0.5, time 2 = masked, time 3 = 0.46
if kk == 0:
## on the first iteration:
## 1. make the unioned geometry
## 2. weight the data according to area
## reference layer for the masked data
lyr = None
if ocgShape==3:
lyr = npd[kk,:,:]
elif ocgShape==4:
lyr = npd[kk,0,:,:]
## select values with spatial overlap and not masked
if hasattr(lyr,'mask'):
select = self._mask*np.invert(lyr.mask)
else:
select = self._mask
#print self._mask
## select those geometries
geoms = self._igrid[select]
## union the geometries
unioned = cascaded_union([p for p in geoms])
## select the weight subset and normalize to unity
sub_weights = self._weights*select
#print sub_weights
#print sub_weights.sum()
#print unioned.area
self._weights = sub_weights/sub_weights.sum()
## apply the weighting
weighted = npd*self._weights
#print (npd*sub_weights).sum()
#print select.sum()
#weighted = npd/sub_weights.sum()*sub_weights
## generate the feature
if ocgShape==3:
feature = dict(
id=ids.next(),
geometry=unioned,
properties=dict({VAR:float(weighted[kk,:,:].sum()),
'timestamp':self.timevec[self._idxtime[kk]]}))
elif ocgShape==4:
feature = dict(
id=ids.next(),
geometry=unioned,
properties=dict({VAR:list(float(weighted[kk,x,:,:].sum()) for x in xrange(len(levels))),
'timestamp':self.timevec[self._idxtime[kk]],
'levels':list(x for x in self.levels[levels])}))
#q.put(feature)
if not(parent == None) and dissolve:
feature['weight']=sub_weights.sum()
features.append(feature)
else:
ctr = None
## loop for each feature. no dissolving.
for ii,jj in self._itr_array_(self._mask):
## if the data is included, add the feature
if self._mask[ii,jj] == True:
#if the geometry has a fraction of a pixel, the other factions could be handled by a different thread
#these must be recombined later, or if it's not clipped there will be duplicates to filter out
if self._weights[ii,jj] < 1 or not clip:
#tag the location this data value is at so it can be compared later
ctr = self._jgrid[ii,jj]
recombine[ctr] = []
## extract the data and convert any mask values
if ocgShape == 3:
data = [self._is_masked_(da) for da in npd[:,ii,jj]]
for kk in range(len(data)):
## do not add the feature if the value is a NoneType
if data[kk] == None: continue
feature = dict(
id=ids.next(),
geometry=self._igrid[ii,jj],
properties=dict({VAR:float(data[kk]),
'timestamp':self.timevec[self._idxtime[kk]]}))
#if the data point covers a partial pixel or isn't clipped add it to the recombine set, otherwise leave it alone
if self._weights[ii,jj] < 1 or not clip:
recombine[ctr].append(feature)
else:
features.append(feature)
elif ocgShape == 4:
if self._weights[ii,jj] < 1 or not clip:
ctr = self._jgrid[ii,jj]
recombine[ctr] = []
data = [self._is_masked_(da) for da in npd[:,:,ii,jj]]
for kk in range(len(data)):
## do not add the feature if the value is a NoneType
if data[kk] == None: continue
feature = dict(
id=ids.next(),
geometry=self._igrid[ii,jj],
properties=dict({VAR:list(float(data[kk][x]) for x in xrange(len(levels))),
'timestamp':self.timevec[self._idxtime[kk]],
'levels':list(x for x in self.levels[levels])}))
#q.put(feature)
if self._weights[ii,jj] < 1 or not clip:
recombine[ctr].append(feature)
else:
features.append(feature)
print('extraction complete.')
if not(parent == None) and dissolve:
q.put((parent,features))
else:
q.put((features,recombine))
return
#sys.exit(0)
#return(features)
def _itr_id_(self,start=1):
while True:
try:
yield start
finally:
start += 1
def as_geojson(elements):
features = []
for e in elements:
e['properties']['timestamp'] = str(e['properties']['timestamp'])
features.append(geojson.Feature(**e))
fc = geojson.FeatureCollection(features)
return(geojson.dumps(fc))
def as_shp(elements,path=None):
if path is None:
path = get_temp_path(suffix='.shp')
ocs = OpenClimateShp(path,elements)
ocs.write()
return(path)
def multipolygon_operation(dataset,var,polygons,time_range=None,clip=None,dissolve=None,levels = None,ocgOpts=None):
elements = []
ncp = OcgDataset(dataset,**ocgOpts)
for ii,polygon in enumerate(polygons):
print(ii)
elements += ncp.extract_elements(var,
polygon=polygon,
time_range=time_range,
clip=clip,
dissolve=dissolve,
levels = levels)
print(repr(len(elements)))
return(elements)
def multipolygon_multicore_operation(dataset,var,polygons,time_range=None,clip=None,dissolve=None,levels = None,ocgOpts=None,subdivide=False,subres='detect'):
elements = []
ret = []
q = Queue()
l = Lock()
pl = []
if not('http:' in dataset or 'www.' in dataset):
if ocgOpts == None:
ocgOpts = {}
ocgOpts['multiReset'] = True
ncp = OcgDataset(dataset,**ocgOpts)
#print ncp.row_bnds.min(),ncp.row_bnds.max()
#print ncp.col_bnds.min(),ncp.col_bnds.max()
#sys.exit()
#create a polygon covering the whole area so that the job can be split
if polygons == [None]:
polygons = [Polygon(((ncp.col_bnds.min(),ncp.row_bnds.min()),(ncp.col_bnds.max(),ncp.row_bnds.min()),(ncp.col_bnds.max(),ncp.row_bnds.max()),(ncp.col_bnds.min(),ncp.row_bnds.max())))]
for ii,polygon in enumerate(polygons):
print(ii)
#if polygons have been specified and subdivide is True, each polygon will be subdivided
#into a gread with resolution of subres. If subres is undefined the resolution is half the square root of the area of the polygons envelope, or approximately 4 subpolygons
if subdivide and not(polygons == None):
#figure out the resolution and subdivide
if subres == 'detect':
subpolys = make_shapely_grid(polygon,sqrt(polygon.envelope.area)/2.0,clip=clip)
else:
subpolys = make_shapely_grid(polygon,subres,clip=clip)
#generate threads for each subpolygon
for poly in subpolys:
#print poly.intersection(polygon).wkt
p = Process(target = ncp.extract_elements,
args = (
q,
var,),
kwargs= {
'lock':l,
'polygon':poly,
'time_range':time_range,
'clip':clip,
'dissolve':dissolve,
'levels' : levels,
'parentPoly':ii})
p.start()
pl.append(p)
#if no polygons are specified only 1 thread will be created
else:
p = Process(target = ncp.extract_elements,
args = (
q,
var,),
kwargs= {
'lock':l,
'polygon':polygon,
'time_range':time_range,
'clip':clip,
'dissolve':dissolve,
'levels' : levels})
p.start()
pl.append(p)
#for p in pl:
#p.join()
#consumer thread loop, the main process will grab any feature lists added by the
#processing threads and continues until those threads have terminated.
a=True
while a:
a=False
#check if any threads are still active
for p in pl:
a = a or p.is_alive()
#remove anything from the queue if present
while not q.empty():
ret.append(q.get())
#give the threads some time to process more stuff
time.sleep(.1)
#The subdivided geometry must be recombined into the original polygons
if subdivide and dissolve:
groups = {}
#form groups of elements based on which polygon they belong to
for x in ret:
if not x[0] in groups:
groups[x[0]] = []
groups[x[0]].append(x[1])
#print '>',groups.keys()
#print groups
#for each group, recombine the geometry and average the data points
for x in groups.keys():
group = groups[x]
#recombine the geometry using the first time period
total = cascaded_union([y[0]['geometry'] for y in group])
#form subgroups consisting of subpolygons that cover the same time period
subgroups = [[g[t] for g in group] for t in xrange(len(group[0]))]
ta = sum([y['weight'] for y in subgroups[0]])
#print ta
#average the data values and form new features
for subgroup in subgroups:
if not(levels == None):
avg = [sum([y['properties'][var][z]*(y['weight']/ta) for y in subgroup]) for z in xrange(len(levels))]
elements.append( dict(
id=subgroup[0]['id'],
geometry=total,
properties=dict({VAR: avg,
'timestamp':subgroup[0]['properties']['timestamp'],
'levels': subgroup[0]['properties']['levels']})))
print total.area
print avg
else:
avg = sum([y['properties'][var]*(y['weight']/ta) for y in subgroup])
elements.append( dict(
id=subgroup[0]['id'],
geometry=total,
properties=dict({VAR:avg,
'timestamp':subgroup[0]['properties']['timestamp']})))
else:
recombine = []
for x in ret:
elements.extend(x[0])
recombine.append(x[1])
keylist = []
for x in recombine:
keylist.extend(x.keys())
keylist = set(keylist)
#print keylist
#print len(keylist)
for key in keylist:
cur = []
for x in recombine:
if key in x:
cur.append(x[key])
if len(cur)==1:
elements.extend(cur[0])
else:
if clip:
elements.extend(cur[0])
else:
geo = cascaded_union([x[0]['geometry'] for x in cur])
for x in cur[0]:
x['geometry'] = geo
elements.append(x)
print len(elements)
return(elements)
def make_shapely_grid(poly,res,as_numpy=False,clip=True):
"""
Return a list or NumPy matrix of shapely Polygon objects.
poly -- shapely Polygon to discretize
res -- target grid resolution in the same units as |poly|
"""
## ensure we have a floating point resolution
res = float(res)
## check that the target polygon is a valid geometry
assert(poly.is_valid)
## vectorize the polygon creation
vfunc_poly = np.vectorize(make_poly_array)#,otypes=[np.object])
## prepare the geometry for faster spatial relationship checking. throws a
## a warning so leaving out for now.
# prepped = prep(poly)
## extract bounding coordinates of the polygon
min_x,min_y,max_x,max_y = poly.envelope.bounds
## convert to matrices
X,Y = np.meshgrid(np.arange(min_x,max_x,res),
np.arange(min_y,max_y,res))
#print X,Y
## shift by the resolution
pmin_x = X
pmax_x = X + res
pmin_y = Y
pmax_y = Y + res
## make the 2-d array
if clip:
poly_array = vfunc_poly(pmin_y,pmin_x,pmax_y,pmax_x,poly)
else:
poly_array = vfunc_poly(pmin_y,pmin_x,pmax_y,pmax_x)
#print poly_array
#sys.exit()
## format according to configuration arguments
if as_numpy:
ret = poly_array
else:
ret = list(poly_array.reshape(-1))
return(ret)
def make_poly_array(min_row,min_col,max_row,max_col,polyint=None):
ret = Polygon(((min_col,min_row),
(max_col,min_row),
(max_col,max_row),
(min_col,max_row),
(min_col,min_row)))
if polyint is not None:
if polyint.intersects(ret) == False:
ret = None
else:
ret = polyint.intersection(ret)
return(ret)
if __name__ == '__main__':
narg = time.time()
## all
# POLYINT = Polygon(((-99,39),(-94,38),(-94,40),(-100,39)))
## great lakes
#POLYINT = Polygon(((-90.35,40.55),(-83,43),(-80.80,49.87),(-90.35,49.87)))
#POLYINT = Polygon(((-90,30),(-70,30),(-70,50),(-90,50)))
#POLYINT = Polygon(((-90,40),(-80,40),(-80,50),(-90,50)))
#POLYINT = Polygon(((-130,18),(-60,18),(-60,98),(-130,98)))
POLYINT = Polygon(((0,0),(0,10),(10,10),(10,0)))
## return all data
#POLYINT = None
## two areas
#POLYINT = [wkt.loads('POLYGON ((-85.324076923076916 44.028020242914977,-84.280765182186229 44.16008502024291,-84.003429149797569 43.301663967611333,-83.607234817813762 42.91867611336032,-84.227939271255053 42.060255060728736,-84.941089068825903 41.307485829959511,-85.931574898785414 41.624441295546553,-85.588206477732783 43.011121457489871,-85.324076923076916 44.028020242914977))'),
#wkt.loads('POLYGON ((-89.24640080971659 46.061817813765174,-88.942651821862341 46.378773279352224,-88.454012145748976 46.431599190283393,-87.952165991902831 46.11464372469635,-88.163469635627521 45.190190283400803,-88.889825910931165 44.503453441295541,-88.770967611336033 43.552587044534405,-88.942651821862341 42.786611336032379,-89.774659919028338 42.760198380566798,-90.038789473684204 43.777097165991897,-89.735040485829956 45.097744939271251,-89.24640080971659 46.061817813765174))')]
## watersheds
# path = '/home/bkoziol/git/OpenClimateGIS/bin/geojson/watersheds_4326.geojson'
## select = ['HURON']
# select = []
# with open(path,'r') as f:
# data = ''.join(f.readlines())
## data2 = f.read()
# gj = geojson.loads(data)
# POLYINT = []
# for feature in gj['features']:
# if select:
# prop = feature['properties']
# if prop['HUCNAME'] in select:
# pass
# else:
# continue
# geom = asShape(feature['geometry'])
# if not isinstance(geom,MultiPolygonAdapter):
# geom = [geom]
# for polygon in geom:
# POLYINT.append(polygon)
# NC = '/home/reid/Desktop/ncconv/pcmdi.ipcc4.bccr_bcm2_0.1pctto2x.run1.monthly.cl_A1_1.nc'
NC = '/home/bkoziol/git/OpenClimateGIS/bin/climate_data/wcrp_cmip3/pcmdi.ipcc4.bccr_bcm2_0.1pctto2x.run1.monthly.cl_A1_1.nc'
#NC = '/home/bkoziol/git/OpenClimateGIS/bin/climate_data/maurer/bccr_bcm2_0.1.sresa1b.monthly.Prcp.1950.nc'
#NC = '/home/reid/Desktop/ncconv/bccr_bcm2_0.1.sresa1b.monthly.Prcp.1950.nc'
#NC = 'http://hydra.fsl.noaa.gov/thredds/dodsC/oc_gis_downscaling.bccr_bcm2.sresa1b.Prcp.Prcp.1.aggregation.1'
# TEMPORAL = [datetime.datetime(1950,2,1),datetime.datetime(1950,4,30)]
#TEMPORAL = [datetime.datetime(1950,2,1),datetime.datetime(1950,3,1)]
TEMPORAL = [datetime.datetime(1960,3,16),datetime.datetime(1961,3,16)] #time range for multi-level file
DISSOLVE = False
CLIP = True
VAR = 'cl'
#VAR = 'Prcp'
#kwds={}
kwds = {
'rowbnds_name': 'lat_bnds',
'colbnds_name': 'lon_bnds',
'time_units': 'days since 1800-1-1 00:00:0.0',
#'time_units': 'days since 1950-1-1 0:0:0.0',
'level_name': 'lev'
}
LEVELS = [x for x in range(0,1)]
#LEVELS = [x for x in range(0,10)]
## open the dataset for reading
dataset = NC#nc.Dataset(NC,'r')
## make iterable if only a single polygon requested
if type(POLYINT) not in (list,tuple): POLYINT = [POLYINT]
## convenience function for multiple polygons
elements = multipolygon_multicore_operation(dataset,
VAR,
POLYINT,
time_range=TEMPORAL,
clip=CLIP,
dissolve=DISSOLVE,
levels = LEVELS,
ocgOpts=kwds,
subdivide=True,
#subres = 360
)
# out = as_shp(elements)
dtime = time.time()
out = as_geojson(elements)
with open('/tmp/out_M2.json','w') as f:
f.write(out)
out_shp = as_shp(elements)
print(out_shp)
dtime = time.time()-dtime
blarg = time.time()
print blarg-narg,dtime,blarg-narg-dtime
|
mapping/OpenClimateGIS
|
src/openclimategis/util/ncconv/experimental/OLD_experimental/in_memory_oo_multi2.py
|
Python
|
bsd-3-clause
| 34,083
|
[
"NetCDF"
] |
470a251c581d0fa210f9fe13b78a310a5ce96d71fc48297ac36404b2dc63d2c2
|
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
"""
A collection of utility functions used by aseba_prep.py and aseba_reformat.py
"""
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from past.utils import old_div
import pandas as pd
import re
def process_demographics_file(filename):
"""
Subset and rename columns in a demographics file from an NCANDA release.
Return with standard release index set in the DataFrame.
"""
df = pd.read_csv(filename)
# df = df[df['visit'] == 'baseline']
df = df[['subject', 'arm', 'visit', 'participant_id', 'visit_age', 'sex']]
df['mri_xnat_sid'] = df['subject']
df = df.rename(columns={'participant_id': 'study_id',
'visit_age': 'age'})
df.set_index(['subject', 'arm', 'visit'], inplace=True)
return df
def get_year_set(year_int):
"""
Given an integer year, get Redcap event names up to and including the year.
Redcap event names are formatted as in redcap_event_name returned by Redcap
API (i.e. "X_visit_arm_1").
Integer-based year: 0 = baseline, 1 = followup_1y, ...
Only allows for full-year standard-arm events.
"""
events = ["baseline"]
events.extend([str(i) + "y" for i in range(1, 10)])
events = [e + "_visit_arm_1" for e in events]
return events[0:(year_int + 1)]
def load_redcap_summary(file, index=True):
"""
Load a release file. Optionally, set its primary keys as pandas indices.
"""
index_col = ['subject', 'arm', 'visit'] if index else None
df = pd.read_csv(file, index_col=index_col, dtype=object, low_memory=False)
return df
def load_redcap_summaries(files):
"""
Given a list of release files, return their horizontal concatenation.
"""
return pd.concat([load_redcap_summary(x) for x in files], axis=1)
def get_id_lookup_from_demographics_file(demographics_df):
"""
Extract a lookup (Redcap ID -> NCANDA SID) from a demographics DataFrame.
Expects a demographics_df outputted by `process_demographics_file`.
"""
return (demographics_df
.reset_index()
.set_index('study_id')
.to_dict()
.get('mri_xnat_sid'))
def api_result_to_release_format(api_df, id_lookup_dict=None, verbose=False):
"""
Reindex a PyCAP API result to an NCANDA release format.
REDCap API, when used with PyCAP, returns results as a DataFrame indexed by
NCANDA ID (study_id - X-00000-Y-0) and combined event + arm
(redcap_event_name)
On the other hand, release files are typically indexed by XNAT ID
(NCANDA_S0?????; mri_xnat_id in Redcap).
This function will:
1. Convert Redcap IDs to NCANDA SIDs using id_lookup_dict (as generated by
`get_id_lookup_from_demographics_file`) or the `mri_xnat_sid` column
(if present in api_df),
2. Drop Redcap IDs that cannot be converted in that way,
3. Separate event and arm to individual columns and make their names
release-compatible,
4. Return DataFrame indexed by release primary keys (subject, arm, visit).
"""
df = api_df.copy(deep=True)
df.reset_index(inplace=True)
if id_lookup_dict:
df['subject'] = df['study_id'].map(id_lookup_dict)
elif 'mri_xnat_sid' in df.columns:
df['subject'] = df['mri_xnat_sid']
else:
raise IndexError("You must supply id_lookup_dict, or api_df has to "
"have the mri_xnat_sid column")
nan_idx = df['subject'].isnull()
if verbose:
study_id_nans = df.loc[nan_idx, 'study_id'].tolist()
print ("Dropping study IDs without corresponding NCANDA SID: " +
", ".join(study_id_nans))
df = df[~nan_idx]
df[['visit', 'arm']] = (df['redcap_event_name']
.str.extract(r'^(\w+)_(arm_\d+)$'))
def clean_up_event_string(event):
"""
If possible, convert Redcap event name to NCANDA release visit name.
If conversion fails, return the original string.
Intended to be passed to pd.Series.map.
"""
# NOTE: Only accounts for full Arm 1 events
match = re.search(r'^(baseline|\dy)', event)
if not match:
return event
elif re.match('^\d', match.group(1)):
return "followup_" + match.group(1)
else:
return match.group(1)
df['visit'] = df['visit'].map(clean_up_event_string)
def clean_up_arm_string(arm):
"""
If possible, convert Redcap arm name to NCANDA release arm name.
If conversion fails, return the original string.
Intended to be passed to pd.Series.map.
"""
arm_dict = {'arm_1': 'standard',
'arm_2': 'recovery',
'arm_3': 'sleep',
'arm_4': 'maltreated'}
if arm not in arm_dict:
return arm
else:
return arm_dict[arm]
df['arm'] = df['arm'].map(clean_up_arm_string)
return df.set_index(['subject', 'arm', 'visit'])
def cbc_colname_sorter(colname):
"""
Extract a machine-sortable number from CBCL columns.
Luckily, section doesn't matter - question numbers increase monotonically,
so all that's necessary is to extract them. An extra wrinkle is question
56, which has letter-numbered parts, so we'll make that a decimal and add
it to the extracted number; this should result in correct sorting.
Intended to be passed to sort as a key function.
"""
match = re.search(r'(\d+)([a-h]?)$', colname)
if not match:
return None
else:
number = float(match.group(1))
letter = match.group(2)
if len(letter) > 0:
letter = old_div((ord(letter) - 96), 100)
else:
letter = 0.0
return number + letter
|
sibis-platform/ncanda-data-integration
|
scripts/reporting/aseba_utils.py
|
Python
|
bsd-3-clause
| 5,978
|
[
"VisIt"
] |
3d1d152c468062d067bf81094ab70ed6caed6befa6ed11d792e3bba57984694b
|
#!/usr/bin/env python
VERSION='1.00'
import os,sys,time
import optparse
from RecoLuminosity.LumiDB import pileupParser
from RecoLuminosity.LumiDB import selectionParser
from math import exp
from math import sqrt
def parseInputFile(inputfilename):
'''
output ({run:[ls:[inlumi, meanint]]})
'''
selectf=open(inputfilename,'r')
inputfilecontent=selectf.read()
p=pileupParser.pileupParser(inputfilecontent)
# p=inputFilesetParser.inputFilesetParser(inputfilename)
runlsbyfile=p.runsandls()
return runlsbyfile
def MyErf(input):
# Abramowitz and Stegun approximations for Erf (equations 7.1.25-28)
X = abs(input)
p = 0.47047
b1 = 0.3480242
b2 = -0.0958798
b3 = 0.7478556
T = 1.0/(1.0+p*X)
cErf = 1.0 - (b1*T + b2*T*T + b3*T*T*T)*exp(-1.0*X*X)
if input<0:
cErf = -1.0*cErf
# Alternate Erf approximation:
#A1 = 0.278393
#A2 = 0.230389
#A3 = 0.000972
#A4 = 0.078108
#term = 1.0+ A1*X+ A2*X*X+ A3*X*X*X+ A4*X*X*X*X
#denom = term*term*term*term
#dErf = 1.0 - 1.0/denom
#if input<0:
# dErf = -1.0*dErf
return cErf
def fillPileupHistogram (lumiInfo, calcOption, hist, minbXsec, Nbins):
'''
lumiinfo:[intlumi per LS, mean interactions ]
intlumi is the deadtime corrected average integraged lumi per lumisection
'''
LSintLumi = lumiInfo[0]
RMSInt = lumiInfo[1]*minbXsec
AveNumInt = lumiInfo[2]*minbXsec
#coeff = 0
#if RMSInt > 0:
# coeff = 1.0/RMSInt/sqrt(6.283185)
#expon = 2.0*RMSInt*RMSInt
Sqrt2 = sqrt(2)
##Nbins = hist.GetXaxis().GetNbins()
ProbFromRMS = []
BinWidth = hist.GetBinWidth(1)
# First, re-constitute lumi distribution for this LS from RMS:
if RMSInt > 0:
AreaLnew = -10.
AreaL = 0
for obs in range (Nbins):
#Old Gaussian normalization; inaccurate for small rms and large bins
#val = hist.GetBinCenter(obs+1)
#prob = coeff*exp(-1.0*(val-AveNumInt)*(val-AveNumInt)/expon)
#ProbFromRMS.append(prob)
left = hist.GetBinLowEdge(obs+1)
right = left+BinWidth
argR = (AveNumInt-right)/Sqrt2/RMSInt
AreaR = MyErf(argR)
if AreaLnew<-5.:
argL = (AveNumInt-left)/Sqrt2/RMSInt
AreaL = MyErf(argL)
else:
AreaL = AreaLnew
AreaLnew = AreaR # save R bin value for L next time
NewProb = (AreaL-AreaR)*0.5
ProbFromRMS.append(NewProb)
#print left, right, argL, argR, AreaL, AreaR, NewProb
else:
obs = hist.FindBin(AveNumInt)
for bin in range (Nbins):
ProbFromRMS.append(0.0)
if obs<Nbins+1:
ProbFromRMS[obs] = 1.0
if AveNumInt < 1.0E-5:
ProbFromRMS[obs] = 0. # just ignore zero values
if calcOption == 'true': # Just put distribution into histogram
if RMSInt > 0:
totalProb = 0
for obs in range (Nbins):
prob = ProbFromRMS[obs]
val = hist.GetBinCenter(obs+1)
#print obs, val, RMSInt,coeff,expon,prob
totalProb += prob
hist.Fill (val, prob * LSintLumi)
if 1.0-totalProb > 0.01:
print "Significant probability density outside of your histogram"
print "Consider using a higher value of --maxPileupBin"
print "Mean %f, RMS %f, Integrated probability %f" % (AveNumInt,RMSInt,totalProb)
# hist.Fill (val, (1 - totalProb) * LSintLumi)
else:
hist.Fill(AveNumInt,LSintLumi)
else: # have to convolute with a poisson distribution to get observed Nint
totalProb = 0
Peak = 0
BinWidth = hist.GetBinWidth(1)
for obs in range (Nbins):
Peak = hist.GetBinCenter(obs+1)
RMSWeight = ProbFromRMS[obs]
for bin in range (Nbins):
val = hist.GetBinCenter(bin+1)-0.5*BinWidth
prob = ROOT.TMath.Poisson (val, Peak)
totalProb += prob
hist.Fill (val, prob * LSintLumi * RMSWeight)
if 1.0-totalProb > 0.01:
print "Significant probability density outside of your histogram"
print "Consider using a higher value of --maxPileupBin"
return hist
##############################
## ######################## ##
## ## ################## ## ##
## ## ## Main Program ## ## ##
## ## ################## ## ##
## ######################## ##
##############################
if __name__ == '__main__':
parser = optparse.OptionParser ("Usage: %prog [--options] output.root",
description = "Script to estimate pileup distribution using xing instantaneous luminosity information and minimum bias cross section. Output is TH1D stored in root file")
#
# parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description = "Pileup Lumi Calculation",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
CalculationModeChoices = ['truth', 'observed']
#
# parse arguments
#
#
# basic arguments
#
#parser.add_argument('action',choices=allowedActions,
# help='command actions')
parser.add_option('-o',dest='outputfile',action='store',
default='PileupCalc.root',
help='output root file')
parser.add_option('-i',dest='inputfile',action='store',
help='Input Run/LS file for your analysis in JSON format (required)')
parser.add_option('--inputLumiJSON',dest='inputLumiJSON',action='store',
help='Input Lumi/Pileup file in JSON format (required)')
parser.add_option('--calcMode',dest='calcMode',action='store',
help='Calculate either True ="true" or Observed="observed" distributions')
parser.add_option('--minBiasXsec',dest='minBiasXsec',action='store',
type=float,
default=73500,
help='Minimum bias cross section assumed (in microbbarn), default %default microbarn')
parser.add_option('--maxPileupBin',dest='maxPileupBin',action='store',
type=int,
default=25,
help='Maximum value of pileup histogram, default %default')
parser.add_option('--numPileupBins',dest='numPileupBins',action='store',
type=int,
default=1000,
help='number of bins in pileup histogram, default %default')
parser.add_option('--pileupHistName',dest='pileupHistName',action='store',
default='pileup',
help='name of pileup histogram, default %default')
parser.add_option('--verbose',dest='verbose',action='store_true',help='verbose mode for printing' )
# parse arguments
try:
(options, args) = parser.parse_args()
except Exception , e:
print e
if not args:
parser.print_usage()
sys.exit()
if len (args) != 1:
parser.print_usage()
raise RuntimeError, "Exactly one output file must be given"
output = args[0]
# options=parser.parse_args()
if options.verbose:
print 'General configuration'
print '\toutputfile: ',options.outputfile
print '\tAction: ',options.calcMode, 'luminosity distribution will be calculated'
print '\tinput selection file: ',options.inputfile
print '\tMinBiasXsec: ',options.minBiasXsec
print '\tmaxPileupBin: ',options.maxPileupBin
print '\tnumPileupBins: ',options.numPileupBins
import ROOT
pileupHist = ROOT.TH1D (options.pileupHistName,options.pileupHistName,
options.numPileupBins,
0., options.maxPileupBin)
nbins = options.numPileupBins
upper = options.maxPileupBin
inpf = open (options.inputfile, 'r')
inputfilecontent = inpf.read()
inputRange = selectionParser.selectionParser (inputfilecontent).runsandls()
#inputRange=inputFilesetParser.inputFilesetParser(options.inputfile)
if options.calcMode in ['true','observed']:
inputPileupRange=parseInputFile(options.inputLumiJSON)
# now, we have to find the information for the input runs and LumiSections
# in the Lumi/Pileup list. First, loop over inputs
for (run, lslist) in sorted (inputRange.iteritems() ):
# now, look for matching run, then match lumi sections
# print "searching for run %d" % (run)
if run in inputPileupRange.keys():
#print run
LSPUlist = inputPileupRange[run]
# print "LSPUlist", LSPUlist
for LSnumber in lslist:
if LSnumber in LSPUlist.keys():
#print "found LS %d" % (LSnumber)
lumiInfo = LSPUlist[LSnumber]
# print lumiInfo
fillPileupHistogram (lumiInfo, options.calcMode,
pileupHist, options.minBiasXsec, nbins)
else: # trouble
print "Run %d, LumiSection %d not found in Lumi/Pileup input file. Check your files!" \
% (run,LSnumber)
else: # trouble
print "Run %d not found in Lumi/Pileup input file. Check your files!" % (run)
# print run
# print lslist
histFile = ROOT.TFile.Open (output, 'recreate')
if not histFile:
raise RuntimeError, \
"Could not open '%s' as an output root file" % output
pileupHist.Write()
#for hist in histList:
# hist.Write()
histFile.Close()
sys.exit()
else:
print "must specify a pileup calculation mode via --calcMode true or --calcMode observed"
sys.exit()
|
iamjakob/lumiCalc
|
LumiDB/scripts/pileupCalc.py
|
Python
|
apache-2.0
| 10,205
|
[
"Gaussian"
] |
758325ee272346eace50f27327f9d437da704fe2a68f67c2512e65445abce5b5
|
# Copyright 2001 by Iddo Friedberg. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
from Bio import FSSP
from Bio.FSSP import FSSPTools
import sys
import os
# import pickle
test_file = os.path.join('FSSP', '1cnv.fssp')
f = sys.stdout
f.write("\nRead in %s\n" % os.path.basename(test_file))
handle = open(test_file)
head_rec, sum_rec, align_rec = FSSP.read_fssp(handle)
handle.close()
f.write("...1cnv.fssp read\n")
for i in ["author", "compnd", "database", "header", "nalign",
"pdbid", "seqlength", "source"]:
f.write('head_rec.%s %s\n' % (i, str(getattr(head_rec, i))))
f.write("\nlen(sum_rec) = %d; head_rec.nalign = %d\n" %
(len(sum_rec), head_rec.nalign))
f.write("The above two numbers should be the same\n")
f.write("\nCreate a multiple alignment instance using Bio.Align\n")
alignment = FSSPTools.mult_align(sum_rec, align_rec)
f.write("...Done\n")
# Percent ID filtering takes too long.. remove from test.
# f.write("\nFilter in percent ID's >= 15%\n")
# sum_ge_15, align_ge_15 = FSSPTools.filter(sum_rec, align_rec, 'pID', 15,100)
# f.write("\nnumber of records filtered in: %d\n" % len(sum_ge_15))
# k = sorted(sum_ge_15)
# f.write("\nRecords filtered in %s\n" % k)
# Pickling takes too long.. remove from test.
# f.write("\nLet's Pickle this\n")
# dump_file = os.path.join('FSSP', 'mydump.pik')
# pickle.dump((head_rec, sum_rec, align_rec),open(dump_file, 'w'))
f.write("\nFilter by name\n")
name_list = ['2hvm0', '1hvq0', '1nar0', '2ebn0']
f.write("\nname list %s\n" % str(name_list))
sum_newnames, align_newnames = FSSPTools.name_filter(sum_rec, align_rec,
name_list)
for key in sorted(sum_newnames):
f.write("%s : %s\n" % (key, sum_newnames[key]))
new_dict = align_newnames['0P168'].pos_align_dict
for key in sorted(new_dict):
f.write("%s : %s\n" % (key, new_dict[key]))
|
updownlife/multipleK
|
dependencies/biopython-1.65/Tests/test_FSSP.py
|
Python
|
gpl-2.0
| 2,016
|
[
"Biopython"
] |
8871ebad586ee094c49bde78ec604327d478418c48b46123680f62f270c3238e
|
"""
Instantiate the global Configuration Object
gConfig is used everywhere within DIRAC to access Configuration data
"""
from DIRAC.ConfigurationSystem.private.ConfigurationClient import ConfigurationClient
#: Global gConfig object of type :class:`~DIRAC.ConfigurationSystem.private.ConfigurationClient.ConfigurationClient`
gConfig = ConfigurationClient()
def getConfig():
"""
:returns: gConfig
:rtype: ~DIRAC.ConfigurationSystem.private.ConfigurationClient.ConfigurationClient
"""
return gConfig
|
DIRACGrid/DIRAC
|
src/DIRAC/ConfigurationSystem/Client/Config.py
|
Python
|
gpl-3.0
| 524
|
[
"DIRAC"
] |
6fc501c4a489dd95d5a480c65573d1f7974fb38440a4488bb6a74dddf0e8db69
|
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
import random
import copy
import itertools
from collections import deque
from hashlib import md5
from functools import cmp_to_key
import six
from six.moves import (cPickle, map, range, zip)
from ..parser.newick import read_newick, write_newick
from .. import utils
# the following imports are necessary to set fixed styles and faces
try:
from ..treeview.main import NodeStyle, _FaceAreas, FaceContainer, FACE_POSITIONS
from ..treeview.faces import Face
except ImportError:
TREEVIEW = False
else:
TREEVIEW = True
__all__ = ["Tree", "TreeNode"]
DEFAULT_COMPACT = False
DEFAULT_SHOWINTERNAL = False
DEFAULT_DIST = 1.0
DEFAULT_SUPPORT = 1.0
DEFAULT_NAME = ""
class TreeError(Exception):
"""
A problem occurred during a TreeNode operation
"""
def __init__(self, value=''):
self.value = value
def __str__(self):
return repr(self.value)
class TreeNode(object):
"""
TreeNode (Tree) class is used to store a tree structure. A tree
consists of a collection of TreeNode instances connected in a
hierarchical way. Trees can be loaded from the New Hampshire Newick
format (newick).
:argument newick: Path to the file containing the tree or, alternatively,
the text string containing the same information.
:argument 0 format: subnewick format
.. table::
====== ==============================================
FORMAT DESCRIPTION
====== ==============================================
0 flexible with support values
1 flexible with internal node names
2 all branches + leaf names + internal supports
3 all branches + all names
4 leaf branches + leaf names
5 internal and leaf branches + leaf names
6 internal branches + leaf names
7 leaf branches + all names
8 all names
9 leaf names
100 topology only
====== ==============================================
:returns: a tree node object which represents the base of the tree.
** Examples: **
::
t1 = Tree() # creates an empty tree
t2 = Tree('(A:1,(B:1,(C:1,D:1):0.5):0.5);')
t3 = Tree('/home/user/myNewickFile.txt')
"""
def _get_dist(self):
return self._dist
def _set_dist(self, value):
try:
self._dist = float(value)
except ValueError:
raise TreeError('node dist must be a float number')
def _get_support(self):
return self._support
def _set_support(self, value):
try:
self._support = float(value)
except ValueError:
raise TreeError('node support must be a float number')
def _get_up(self):
return self._up
def _set_up(self, value):
if type(value) == type(self) or value is None:
self._up = value
else:
raise TreeError("bad node_up type")
def _get_children(self):
return self._children
def _set_children(self, value):
if type(value) == list and \
len(set([type(n)==type(self) for n in value]))<2:
self._children = value
else:
raise TreeError("Incorrect children type")
def _get_style(self):
if self._img_style is None:
self._set_style(None)
return self._img_style
def _set_style(self, value):
self.set_style(value)
#: Branch length distance to parent node. Default = 0.0
img_style = property(fget=_get_style, fset=_set_style)
#: Branch length distance to parent node. Default = 0.0
dist = property(fget=_get_dist, fset=_set_dist)
#: Branch support for current node
support = property(fget=_get_support, fset=_set_support)
#: Pointer to parent node
up = property(fget=_get_up, fset=_set_up)
#: A list of children nodes
children = property(fget=_get_children, fset=_set_children)
def _set_face_areas(self, value):
if isinstance(value, _FaceAreas):
self._faces = value
else:
raise ValueError("[%s] is not a valid FaceAreas instance" %type(value))
def _get_face_areas(self):
if not hasattr(self, "_faces"):
self._faces = _FaceAreas()
return self._faces
faces = property(fget=_get_face_areas, \
fset=_set_face_areas)
def __init__(self, newick=None, format=0, dist=None, support=None,
name=None):
self._children = []
self._up = None
self._dist = DEFAULT_DIST
self._support = DEFAULT_SUPPORT
self._img_style = None
self.features = set([])
# Add basic features
self.features.update(["dist", "support", "name"])
if dist is not None:
self.dist = dist
if support is not None:
self.support = support
self.name = name if name is not None else DEFAULT_NAME
# Initialize tree
if newick is not None:
self._dist = 0.0
read_newick(newick, root_node = self, format=format)
def __nonzero__(self):
return True
def __bool__(self):
"""
Python3's equivalent of __nonzero__
If this is not defined bool(class_instance) will call
__len__ in python3
"""
return True
def __repr__(self):
return "Tree node '%s' (%s)" %(self.name, hex(self.__hash__()))
def __and__(self, value):
""" This allows to execute tree&'A' to obtain the descendant node
whose name is A"""
value=str(value)
try:
first_match = next(self.iter_search_nodes(name=value))
return first_match
except StopIteration:
raise TreeError("Node not found")
def __add__(self, value):
""" This allows to sum two trees."""
# Should a make the sum with two copies of the original trees?
if type(value) == self.__class__:
new_root = self.__class__()
new_root.add_child(self)
new_root.add_child(value)
return new_root
else:
raise TreeError("Invalid node type")
def __str__(self):
""" Print tree in newick format. """
return self.get_ascii(compact=DEFAULT_COMPACT, \
show_internal=DEFAULT_SHOWINTERNAL)
def __contains__(self, item):
""" Check if item belongs to this node. The 'item' argument must
be a node instance or its associated name."""
if isinstance(item, self.__class__):
return item in set(self.get_descendants())
elif type(item)==str:
return item in set([n.name for n in self.traverse()])
def __len__(self):
"""Node len returns number of children."""
return len(self.get_leaves())
def __iter__(self):
""" Iterator over leaf nodes"""
return self.iter_leaves()
def add_feature(self, pr_name, pr_value):
"""
Add or update a node's feature.
"""
setattr(self, pr_name, pr_value)
self.features.add(pr_name)
def add_features(self, **features):
"""
Add or update several features. """
for fname, fvalue in six.iteritems(features):
setattr(self, fname, fvalue)
self.features.add(fname)
def del_feature(self, pr_name):
"""
Permanently deletes a node's feature.
"""
if hasattr(self, pr_name):
delattr(self, pr_name)
self.features.remove(pr_name)
# Topology management
def add_child(self, child=None, name=None, dist=None, support=None):
"""
Adds a new child to this node. If child node is not suplied
as an argument, a new node instance will be created.
:argument None child: the node instance to be added as a child.
:argument None name: the name that will be given to the child.
:argument None dist: the distance from the node to the child.
:argument None support': the support value of child partition.
:returns: The child node instance
"""
if child is None:
child = self.__class__()
if name is not None:
child.name = name
if dist is not None:
child.dist = dist
if support is not None:
child.support = support
self.children.append(child)
child.up = self
return child
def remove_child(self, child):
"""
Removes a child from this node (parent and child
nodes still exit but are no longer connected).
"""
try:
self.children.remove(child)
except ValueError as e:
raise TreeError("child not found")
else:
child.up = None
return child
def add_sister(self, sister=None, name=None, dist=None):
"""
Adds a sister to this node. If sister node is not supplied
as an argument, a new TreeNode instance will be created and
returned.
"""
if self.up == None:
raise TreeError("A parent node is required to add a sister")
else:
return self.up.add_child(child=sister, name=name, dist=dist)
def remove_sister(self, sister=None):
"""
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
"""
sisters = self.get_sisters()
if len(sisters) > 0:
if sister is None:
sister = sisters.pop(0)
return self.up.remove_child(sister)
def delete(self, prevent_nondicotomic=True, preserve_branch_length=False):
"""
Deletes node from the tree structure. Notice that this method
makes 'disappear' the node from the tree structure. This means
that children from the deleted node are transferred to the
next available parent.
:param True prevent_nondicotomic: When True (default), delete
function will be execute recursively to prevent single-child
nodes.
:param False preserve_branch_length: If True, branch lengths
of the deleted nodes are transferred (summed up) to its
parent's branch, thus keeping original distances among nodes.
**Example:**
::
/ C
root-|
| / B
\--- H |
\ A
> H.delete() will produce this structure:
/ C
|
root-|--B
|
\ A
"""
parent = self.up
if parent:
if preserve_branch_length:
if len(self.children) == 1:
self.children[0].dist += self.dist
elif len(self.children) > 1:
parent.dist += self.dist
for ch in self.children:
parent.add_child(ch)
parent.remove_child(self)
# Avoids parents with only one child
if prevent_nondicotomic and parent and\
len(parent.children) < 2:
parent.delete(prevent_nondicotomic=False,
preserve_branch_length=preserve_branch_length)
def detach(self):
"""
Detachs this node (and all its descendants) from its parent
and returns the referent to itself.
Detached node conserves all its structure of descendants, and can
be attached to another node through the 'add_child' function. This
mechanism can be seen as a cut and paste.
"""
if self.up:
self.up.children.remove(self)
self.up = None
return self
def prune(self, nodes, preserve_branch_length=False):
"""Prunes the topology of a node to conserve only the selected list of leaf
internal nodes. The minimum number of nodes that conserve the
topological relationships among the requested nodes will be
retained. Root node is always conserved.
:var nodes: a list of node names or node objects that should be retained
:param False preserve_branch_length: If True, branch lengths
of the deleted nodes are transferred (summed up) to its
parent's branch, thus keeping original distances among nodes.
**Examples:**
::
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'C'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root- C|
# | \-B
# | /-I
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'I'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E /-I
# | | -root
#-root \-G | /-A
# | \C|
# | /-I \-B
# \K|
# \-J
t1 = Tree('(((((A,B)C)D,E)F,G)H,(I,J)K)root;', format=1)
t1.prune(['A', 'B', 'F', 'H'])
# /-A
# /D /C|
# /F| \-B
# | |
# /H| \-E
# | | /-A
#-root \-G -root-H /F|
# | \-B
# | /-I
# \K|
# \-J
"""
def cmp_nodes(x, y):
# if several nodes are in the same path of two kept nodes,
# only one should be maintained. This prioritize internal
# nodes that are already in the to_keep list and then
# deeper nodes (closer to the leaves).
if n2depth[x] > n2depth[y]:
return -1
elif n2depth[x] < n2depth[y]:
return 1
else:
return 0
to_keep = set(_translate_nodes(self, *nodes))
start, node2path = self.get_common_ancestor(to_keep, get_path=True)
to_keep.add(self)
# Calculate which kept nodes are visiting the same nodes in
# their path to the common ancestor.
n2count = {}
n2depth = {}
for seed, path in six.iteritems(node2path):
for visited_node in path:
if visited_node not in n2depth:
depth = visited_node.get_distance(start, topology_only=True)
n2depth[visited_node] = depth
if visited_node is not seed:
n2count.setdefault(visited_node, set()).add(seed)
# if several internal nodes are in the path of exactly the same kept
# nodes, only one (the deepest) should be maintain.
visitors2nodes = {}
for node, visitors in six.iteritems(n2count):
# keep nodes connection at least two other nodes
if len(visitors)>1:
visitor_key = frozenset(visitors)
visitors2nodes.setdefault(visitor_key, set()).add(node)
for visitors, nodes in six.iteritems(visitors2nodes):
if not (to_keep & nodes):
sorted_nodes = sorted(nodes, key=cmp_to_key(cmp_nodes))
to_keep.add(sorted_nodes[0])
for n in self.get_descendants('postorder'):
if n not in to_keep:
if preserve_branch_length:
if len(n.children) == 1:
n.children[0].dist += n.dist
elif len(n.children) > 1 and n.up:
n.up.dist += n.dist
n.delete(prevent_nondicotomic=False)
def swap_children(self):
"""
Swaps current children order.
"""
if len(self.children)>1:
self.children.reverse()
# #####################
# Tree traversing
# #####################
def get_children(self):
"""
Returns an independent list of node's children.
"""
return [ch for ch in self.children]
def get_sisters(self):
"""
Returns an indepent list of sister nodes.
"""
if self.up!=None:
return [ch for ch in self.up.children if ch!=self]
else:
return []
def iter_leaves(self, is_leaf_fn=None):
"""
Returns an iterator over the leaves under this node.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
for n in self.traverse(strategy="preorder", is_leaf_fn=is_leaf_fn):
if not is_leaf_fn:
if n.is_leaf():
yield n
else:
if is_leaf_fn(n):
yield n
def get_leaves(self, is_leaf_fn=None):
"""
Returns the list of terminal nodes (leaves) under this node.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
return [n for n in self.iter_leaves(is_leaf_fn=is_leaf_fn)]
def iter_leaf_names(self, is_leaf_fn=None):
"""
Returns an iterator over the leaf names under this node.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
for n in self.iter_leaves(is_leaf_fn=is_leaf_fn):
yield n.name
def get_leaf_names(self, is_leaf_fn=None):
"""
Returns the list of terminal node names under the current
node.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
return [name for name in self.iter_leaf_names(is_leaf_fn=is_leaf_fn)]
def iter_descendants(self, strategy="levelorder", is_leaf_fn=None):
"""
Returns an iterator over all descendant nodes.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
for n in self.traverse(strategy=strategy, is_leaf_fn=is_leaf_fn):
if n is not self:
yield n
def get_descendants(self, strategy="levelorder", is_leaf_fn=None):
"""
Returns a list of all (leaves and internal) descendant nodes.
:argument None is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
"""
return [n for n in self.iter_descendants(strategy=strategy, \
is_leaf_fn=is_leaf_fn)]
def traverse(self, strategy="levelorder", is_leaf_fn=None):
"""
Returns an iterator to traverse the tree structure under this
node.
:argument "levelorder" strategy: set the way in which tree
will be traversed. Possible values are: "preorder" (first
parent and then children) 'postorder' (first children and
the parent) and "levelorder" (nodes are visited in order
from root to leaves)
:argument None is_leaf_fn: If supplied, ``is_leaf_fn``
function will be used to interrogate nodes about if they
are terminal or internal. ``is_leaf_fn`` function should
receive a node instance as first argument and return True
or False. Use this argument to traverse a tree by
dynamically collapsing internal nodes matching
``is_leaf_fn``.
"""
if strategy=="preorder":
return self._iter_descendants_preorder(is_leaf_fn=is_leaf_fn)
elif strategy=="levelorder":
return self._iter_descendants_levelorder(is_leaf_fn=is_leaf_fn)
elif strategy=="postorder":
return self._iter_descendants_postorder(is_leaf_fn=is_leaf_fn)
def iter_prepostorder(self, is_leaf_fn=None):
"""
Iterate over all nodes in a tree yielding every node in both
pre and post order. Each iteration returns a postorder flag
(True if node is being visited in postorder) and a node
instance.
"""
to_visit = [self]
if is_leaf_fn is not None:
_leaf = is_leaf_fn
else:
_leaf = self.__class__.is_leaf
while to_visit:
node = to_visit.pop(-1)
try:
node = node[1]
except TypeError:
# PREORDER ACTIONS
yield (False, node)
if not _leaf(node):
# ADD CHILDREN
to_visit.extend(reversed(node.children + [[1, node]]))
else:
#POSTORDER ACTIONS
yield (True, node)
def _iter_descendants_postorder(self, is_leaf_fn=None):
to_visit = [self]
if is_leaf_fn is not None:
_leaf = is_leaf_fn
else:
_leaf = self.__class__.is_leaf
while to_visit:
node = to_visit.pop(-1)
try:
node = node[1]
except TypeError:
# PREORDER ACTIONS
if not _leaf(node):
# ADD CHILDREN
to_visit.extend(reversed(node.children + [[1, node]]))
else:
yield node
else:
#POSTORDER ACTIONS
yield node
def _iter_descendants_levelorder(self, is_leaf_fn=None):
"""
Iterate over all desdecendant nodes.
"""
tovisit = deque([self])
while len(tovisit)>0:
node = tovisit.popleft()
yield node
if not is_leaf_fn or not is_leaf_fn(node):
tovisit.extend(node.children)
def _iter_descendants_preorder(self, is_leaf_fn=None):
"""
Iterator over all descendant nodes.
"""
to_visit = deque()
node = self
while node is not None:
yield node
if not is_leaf_fn or not is_leaf_fn(node):
to_visit.extendleft(reversed(node.children))
try:
node = to_visit.popleft()
except:
node = None
def iter_ancestors(self):
'''versionadded: 2.2
Iterates over the list of all ancestor nodes from current node
to the current tree root.
'''
node = self
while node.up is not None:
yield node.up
node = node.up
def get_ancestors(self):
'''versionadded: 2.2
Returns the list of all ancestor nodes from current node to
the current tree root.
'''
return [n for n in self.iter_ancestors()]
def describe(self):
"""
Prints general information about this node and its
connections.
"""
if len(self.get_tree_root().children)==2:
rooting = "Yes"
elif len(self.get_tree_root().children)>2:
rooting = "No"
else:
rooting = "No children"
max_node, max_dist = self.get_farthest_leaf()
cached_content = self.get_cached_content()
print("Number of leaf nodes:\t%d" % len(cached_content[self]))
print("Total number of nodes:\t%d" % len(cached_content))
print("Rooted:\t%s" %rooting)
print("Most distant node:\t%s" %max_node.name)
print("Max. distance:\t%f" %max_dist)
def write(self, features=None, outfile=None, format=0, is_leaf_fn=None,
format_root_node=False, dist_formatter=None, support_formatter=None,
name_formatter=None):
"""
Returns the newick representation of current node. Several
arguments control the way in which extra data is shown for
every node:
:argument features: a list of feature names to be exported
using the Extended Newick Format (i.e. features=["name",
"dist"]). Use an empty list to export all available features
in each node (features=[])
:argument outfile: writes the output to a given file
:argument format: defines the newick standard used to encode the
tree. See tutorial for details.
:argument False format_root_node: If True, it allows features
and branch information from root node to be exported as a
part of the newick text string. For newick compatibility
reasons, this is False by default.
:argument is_leaf_fn: See :func:`TreeNode.traverse` for
documentation.
**Example:**
::
t.get_newick(features=["species","name"], format=1)
"""
nw = write_newick(self, features=features,
format=format,
is_leaf_fn=is_leaf_fn,
format_root_node=format_root_node,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter)
if outfile is not None:
with open(outfile, "w") as OUT:
OUT.write(nw)
else:
return nw
def get_tree_root(self):
"""
Returns the absolute root node of current tree structure.
"""
root = self
while root.up is not None:
root = root.up
return root
def get_common_ancestor(self, *target_nodes, **kargs):
"""
Returns the first common ancestor between this node and a given
list of 'target_nodes'.
**Examples:**
::
t = tree.Tree("(((A:0.1, B:0.01):0.001, C:0.0001):1.0[&&NHX:name=common], (D:0.00001):0.000001):2.0[&&NHX:name=root];")
A = t.get_descendants_by_name("A")[0]
C = t.get_descendants_by_name("C")[0]
common = A.get_common_ancestor(C)
print common.name
"""
get_path = kargs.get("get_path", False)
if len(target_nodes) == 1 and type(target_nodes[0]) \
in set([set, tuple, list, frozenset]):
target_nodes = target_nodes[0]
# Convert node names into node instances
target_nodes = _translate_nodes(self, *target_nodes)
# If only one node is provided, use self as the second target
if type(target_nodes) != list:
target_nodes = [target_nodes, self]
n2path = {}
reference = []
ref_node = None
for n in target_nodes:
current = n
while current:
n2path.setdefault(n, set()).add(current)
if not ref_node:
reference.append(current)
current = current.up
if not ref_node:
ref_node = n
common = None
for n in reference:
broken = False
for node, path in six.iteritems(n2path):
if node is not ref_node and n not in path:
broken = True
break
if not broken:
common = n
break
if not common:
raise TreeError("Nodes are not connected!")
if get_path:
return common, n2path
else:
return common
def iter_search_nodes(self, **conditions):
"""
Search nodes in an interative way. Matches are being yield as
they are being found. This avoids to scan the full tree
topology before returning the first matches. Useful when
dealing with huge trees.
"""
for n in self.traverse():
conditions_passed = 0
for key, value in six.iteritems(conditions):
if hasattr(n, key) and getattr(n, key) == value:
conditions_passed +=1
if conditions_passed == len(conditions):
yield n
def search_nodes(self, **conditions):
"""
Returns the list of nodes matching a given set of conditions.
**Example:**
::
tree.search_nodes(dist=0.0, name="human")
"""
matching_nodes = []
for n in self.iter_search_nodes(**conditions):
matching_nodes.append(n)
return matching_nodes
def get_leaves_by_name(self, name):
"""
Returns a list of leaf nodes matching a given name.
"""
return self.search_nodes(name=name, children=[])
def is_leaf(self):
"""
Return True if current node is a leaf.
"""
return len(self.children) == 0
def is_root(self):
"""
Returns True if current node has no parent
"""
if self.up is None:
return True
else:
return False
# ###########################
# Distance related functions
# ###########################
def get_distance(self, target, target2=None, topology_only=False):
"""
Returns the distance between two nodes. If only one target is
specified, it returns the distance bewtween the target and the
current node.
:argument target: a node within the same tree structure.
:argument target2: a node within the same tree structure. If
not specified, current node is used as target2.
:argument False topology_only: If set to True, distance will
refer to the number of nodes between target and target2.
:returns: branch length distance between target and
target2. If topology_only flag is True, returns the number
of nodes between target and target2.
"""
if target2 is None:
target2 = self
root = self.get_tree_root()
else:
# is target node under current node?
root = self
target, target2 = _translate_nodes(root, target, target2)
ancestor = root.get_common_ancestor(target, target2)
dist = 0.0
for n in [target2, target]:
current = n
while current != ancestor:
if topology_only:
if current!=target:
dist += 1
else:
dist += current.dist
current = current.up
return dist
def get_farthest_node(self, topology_only=False):
"""
Returns the node's farthest descendant or ancestor node, and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest node referred to the
current node and the distance to it.
"""
# Init fasthest node to current farthest leaf
farthest_node, farthest_dist = self.get_farthest_leaf(topology_only=topology_only)
prev = self
cdist = 0.0 if topology_only else prev.dist
current = prev.up
while current is not None:
for ch in current.children:
if ch != prev:
if not ch.is_leaf():
fnode, fdist = ch.get_farthest_leaf(topology_only=topology_only)
else:
fnode = ch
fdist = 0
if topology_only:
fdist += 1.0
else:
fdist += ch.dist
if cdist+fdist > farthest_dist:
farthest_dist = cdist + fdist
farthest_node = fnode
prev = current
if topology_only:
cdist += 1
else:
cdist += prev.dist
current = prev.up
return farthest_node, farthest_dist
def _get_farthest_and_closest_leaves(self, topology_only=False, is_leaf_fn=None):
# if called from a leaf node, no necessary to compute
if (is_leaf_fn and is_leaf_fn(self)) or self.is_leaf():
return self, 0.0, self, 0.0
min_dist = None
min_node = None
max_dist = None
max_node = None
d = 0.0
for post, n in self.iter_prepostorder(is_leaf_fn=is_leaf_fn):
if n is self:
continue
if post:
d -= n.dist if not topology_only else 1.0
else:
if (is_leaf_fn and is_leaf_fn(n)) or n.is_leaf():
total_d = d + n.dist if not topology_only else d
if min_dist is None or total_d < min_dist:
min_dist = total_d
min_node = n
if max_dist is None or total_d > max_dist:
max_dist = total_d
max_node = n
else:
d += n.dist if not topology_only else 1.0
return min_node, min_dist, max_node, max_dist
def get_farthest_leaf(self, topology_only=False, is_leaf_fn=None):
"""
Returns node's farthest descendant node (which is always a leaf), and the
distance to it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the farthest leaf referred to the
current node and the distance to it.
"""
min_node, min_dist, max_node, max_dist = self._get_farthest_and_closest_leaves(
topology_only=topology_only, is_leaf_fn=is_leaf_fn)
return max_node, max_dist
def get_closest_leaf(self, topology_only=False, is_leaf_fn=None):
"""Returns node's closest descendant leaf and the distance to
it.
:argument False topology_only: If set to True, distance
between nodes will be referred to the number of nodes
between them. In other words, topological distance will be
used instead of branch length distances.
:return: A tuple containing the closest leaf referred to the
current node and the distance to it.
"""
min_node, min_dist, max_node, max_dist = self._get_farthest_and_closest_leaves(
topology_only=topology_only, is_leaf_fn=is_leaf_fn)
return min_node, min_dist
def get_midpoint_outgroup(self):
"""
Returns the node that divides the current tree into two distance-balanced
partitions.
"""
# Gets the farthest node to the current root
root = self.get_tree_root()
nA, r2A_dist = root.get_farthest_leaf()
nB, A2B_dist = nA.get_farthest_node()
outgroup = nA
middist = A2B_dist / 2.0
cdist = 0
current = nA
while current is not None:
cdist += current.dist
if cdist > (middist): # Deja de subir cuando se pasa del maximo
break
else:
current = current.up
return current
def populate(self, size, names_library=None, reuse_names=False,
random_branches=False, branch_range=(0,1),
support_range=(0,1)):
"""
Generates a random topology by populating current node.
:argument None names_library: If provided, names library
(list, set, dict, etc.) will be used to name nodes.
:argument False reuse_names: If True, node names will not be
necessarily unique, which makes the process a bit more
efficient.
:argument False random_branches: If True, branch distances and support
values will be randomized.
:argument (0,1) branch_range: If random_branches is True, this
range of values will be used to generate random distances.
:argument (0,1) support_range: If random_branches is True,
this range of values will be used to generate random branch
support values.
"""
NewNode = self.__class__
if len(self.children) > 1:
connector = NewNode()
for ch in self.get_children():
ch.detach()
connector.add_child(child = ch)
root = NewNode()
self.add_child(child = connector)
self.add_child(child = root)
else:
root = self
next_deq = deque([root])
for i in range(size-1):
if random.randint(0, 1):
p = next_deq.pop()
else:
p = next_deq.popleft()
c1 = p.add_child()
c2 = p.add_child()
next_deq.extend([c1, c2])
if random_branches:
c1.dist = random.uniform(*branch_range)
c2.dist = random.uniform(*branch_range)
c1.support = random.uniform(*branch_range)
c2.support = random.uniform(*branch_range)
else:
c1.dist = 1.0
c2.dist = 1.0
c1.support = 1.0
c2.support = 1.0
# next contains leaf nodes
charset = "abcdefghijklmnopqrstuvwxyz"
if names_library:
names_library = deque(names_library)
else:
avail_names = itertools.combinations_with_replacement(charset, 10)
for n in next_deq:
if names_library:
if reuse_names:
tname = random.sample(names_library, 1)[0]
else:
tname = names_library.pop()
else:
tname = ''.join(next(avail_names))
n.name = tname
def set_outgroup(self, outgroup):
"""
Sets a descendant node as the outgroup of a tree. This function
can be used to root a tree or even an internal node.
:argument outgroup: a node instance within the same tree
structure that will be used as a basal node.
"""
outgroup = _translate_nodes(self, outgroup)
if self == outgroup:
raise TreeError("Cannot set myself as outgroup")
parent_outgroup = outgroup.up
# Detects (sub)tree root
n = outgroup
while n.up is not self:
n = n.up
# If outgroup is a child from root, but with more than one
# sister nodes, creates a new node to group them
self.children.remove(n)
if len(self.children) != 1:
down_branch_connector = self.__class__()
down_branch_connector.dist = 0.0
down_branch_connector.support = n.support
for ch in self.get_children():
down_branch_connector.children.append(ch)
ch.up = down_branch_connector
self.children.remove(ch)
else:
down_branch_connector = self.children[0]
# Connects down branch to myself or to outgroup
quien_va_ser_padre = parent_outgroup
if quien_va_ser_padre is not self:
# Parent-child swapping
quien_va_ser_hijo = quien_va_ser_padre.up
quien_fue_padre = None
buffered_dist = quien_va_ser_padre.dist
buffered_support = quien_va_ser_padre.support
while quien_va_ser_hijo is not self:
quien_va_ser_padre.children.append(quien_va_ser_hijo)
quien_va_ser_hijo.children.remove(quien_va_ser_padre)
buffered_dist2 = quien_va_ser_hijo.dist
buffered_support2 = quien_va_ser_hijo.support
quien_va_ser_hijo.dist = buffered_dist
quien_va_ser_hijo.support = buffered_support
buffered_dist = buffered_dist2
buffered_support = buffered_support2
quien_va_ser_padre.up = quien_fue_padre
quien_fue_padre = quien_va_ser_padre
quien_va_ser_padre = quien_va_ser_hijo
quien_va_ser_hijo = quien_va_ser_padre.up
quien_va_ser_padre.children.append(down_branch_connector)
down_branch_connector.up = quien_va_ser_padre
quien_va_ser_padre.up = quien_fue_padre
down_branch_connector.dist += buffered_dist
outgroup2 = parent_outgroup
parent_outgroup.children.remove(outgroup)
outgroup2.dist = 0
else:
outgroup2 = down_branch_connector
outgroup.up = self
outgroup2.up = self
# outgroup is always the first children. Some function my
# trust on this fact, so do no change this.
self.children = [outgroup,outgroup2]
middist = (outgroup2.dist + outgroup.dist)/2
outgroup.dist = middist
outgroup2.dist = middist
outgroup2.support = outgroup.support
def unroot(self):
"""
Unroots current node. This function is expected to be used on
the absolute tree root node, but it can be also be applied to
any other internal node. It will convert a split into a
multifurcation.
"""
if len(self.children)==2:
if not self.children[0].is_leaf():
self.children[0].delete()
elif not self.children[1].is_leaf():
self.children[1].delete()
else:
raise TreeError("Cannot unroot a tree with only two leaves")
def show(self, layout=None, tree_style=None, name="ETE"):
"""
Starts an interative session to visualize current node
structure using provided layout and TreeStyle.
"""
from ..treeview import drawer
drawer.show_tree(self, layout=layout,
tree_style=tree_style, win_name=name)
def render(self, file_name, layout=None, w=None, h=None, \
tree_style=None, units="px", dpi=90):
"""
Renders the node structure as an image.
:var file_name: path to the output image file. valid
extensions are .SVG, .PDF, .PNG
:var layout: a layout function or a valid layout function name
:var tree_style: a `TreeStyle` instance containing the image
properties
:var px units: "px": pixels, "mm": millimeters, "in": inches
:var None h: height of the image in :attr:`units`
:var None w: width of the image in :attr:`units`
:var 300 dpi: dots per inches.
"""
from ..treeview import drawer
if file_name == '%%return':
return drawer.get_img(self, w=w, h=h,
layout=layout, tree_style=tree_style,
units=units, dpi=dpi)
else:
return drawer.render_tree(self, file_name, w=w, h=h,
layout=layout, tree_style=tree_style,
units=units, dpi=dpi)
def copy(self, method="cpickle"):
""".. versionadded: 2.1
Returns a copy of the current node.
:var cpickle method: Protocol used to copy the node
structure. The following values are accepted:
- "newick": Tree topology, node names, branch lengths and
branch support values will be copied by as represented in
the newick string (copy by newick string serialisation).
- "newick-extended": Tree topology and all node features
will be copied based on the extended newick format
representation. Only node features will be copied, thus
excluding other node attributes. As this method is also
based on newick serialisation, features will be converted
into text strings when making the copy.
- "cpickle": The whole node structure and its content is
cloned based on cPickle object serialisation (slower, but
recommended for full tree copying)
- "deepcopy": The whole node structure and its content is
copied based on the standard "copy" Python functionality
(this is the slowest method but it allows to copy complex
objects even if attributes point to lambda functions,
etc.)
"""
method = method.lower()
if method=="newick":
new_node = self.__class__(self.write(features=["name"], format_root_node=True))
elif method=="newick-extended":
self.write(features=[], format_root_node=True)
new_node = self.__class__(self.write(features=[]))
elif method == "deepcopy":
parent = self.up
self.up = None
new_node = copy.deepcopy(self)
self.up = parent
elif method == "cpickle":
parent = self.up
self.up = None
new_node = six.moves.cPickle.loads(six.moves.cPickle.dumps(self, 2))
self.up = parent
else:
raise TreeError("Invalid copy method")
return new_node
def _asciiArt(self, char1='-', show_internal=True, compact=False, attributes=None):
"""
Returns the ASCII representation of the tree.
Code based on the PyCogent GPL project.
"""
if not attributes:
attributes = ["name"]
node_name = ', '.join(map(str, [getattr(self, v) for v in attributes if hasattr(self, v)]))
LEN = max(3, len(node_name) if not self.children or show_internal else 3)
PAD = ' ' * LEN
PA = ' ' * (LEN-1)
if not self.is_leaf():
mids = []
result = []
for c in self.children:
if len(self.children) == 1:
char2 = '/'
elif c is self.children[0]:
char2 = '/'
elif c is self.children[-1]:
char2 = '\\'
else:
char2 = '-'
(clines, mid) = c._asciiArt(char2, show_internal, compact, attributes)
mids.append(mid+len(result))
result.extend(clines)
if not compact:
result.append('')
if not compact:
result.pop()
(lo, hi, end) = (mids[0], mids[-1], len(result))
prefixes = [PAD] * (lo+1) + [PA+'|'] * (hi-lo-1) + [PAD] * (end-hi)
mid = int((lo + hi) / 2)
prefixes[mid] = char1 + '-'*(LEN-2) + prefixes[mid][-1]
result = [p+l for (p,l) in zip(prefixes, result)]
if show_internal:
stem = result[mid]
result[mid] = stem[0] + node_name + stem[len(node_name)+1:]
return (result, mid)
else:
return ([char1 + '-' + node_name], 0)
def get_ascii(self, show_internal=True, compact=False, attributes=None):
"""
Returns a string containing an ascii drawing of the tree.
:argument show_internal: includes internal edge names.
:argument compact: use exactly one line per tip.
:param attributes: A list of node attributes to shown in the
ASCII representation.
"""
(lines, mid) = self._asciiArt(show_internal=show_internal,
compact=compact, attributes=attributes)
return '\n'+'\n'.join(lines)
def ladderize(self, direction=0):
"""
.. versionadded: 2.1
Sort the branches of a given tree (swapping children nodes)
according to the size of each partition.
::
t = Tree("(f,((d, ((a,b),c)),e));")
print t
#
# /-f
# |
# | /-d
# ----| |
# | /---| /-a
# | | | /---|
# | | \---| \-b
# \---| |
# | \-c
# |
# \-e
t.ladderize()
print t
# /-f
# ----|
# | /-e
# \---|
# | /-d
# \---|
# | /-c
# \---|
# | /-a
# \---|
# \-b
"""
if not self.is_leaf():
n2s = {}
for n in self.get_children():
s = n.ladderize(direction=direction)
n2s[n] = s
self.children.sort(key=lambda x: n2s[x])
if direction == 1:
self.children.reverse()
size = sum(n2s.values())
else:
size = 1
return size
def sort_descendants(self, attr="name"):
"""
.. versionadded: 2.1
This function sort the branches of a given tree by
considerening node names. After the tree is sorted, nodes are
labeled using ascendent numbers. This can be used to ensure
that nodes in a tree with the same node names are always
labeled in the same way. Note that if duplicated names are
present, extra criteria should be added to sort nodes.
Unique id is stored as a node._nid attribute
"""
node2content = self.get_cached_content(store_attr=attr, container_type=list)
for n in self.traverse():
if not n.is_leaf():
n.children.sort(key=lambda x: str(sorted(node2content[x])))
def get_cached_content(self, store_attr=None, container_type=set, _store=None):
"""
.. versionadded: 2.2
Returns a dictionary pointing to the preloaded content of each
internal node under this tree. Such a dictionary is intended
to work as a cache for operations that require many traversal
operations.
:param None store_attr: Specifies the node attribute that
should be cached (i.e. name, distance, etc.). When none, the
whole node instance is cached.
:param _store: (internal use)
"""
if _store is None:
_store = {}
for ch in self.children:
ch.get_cached_content(store_attr=store_attr,
container_type=container_type,
_store=_store)
if self.children:
val = container_type()
for ch in self.children:
if type(val) == list:
val.extend(_store[ch])
if type(val) == set:
val.update(_store[ch])
_store[self] = val
else:
if store_attr is None:
val = self
else:
val = getattr(self, store_attr)
_store[self] = container_type([val])
return _store
def robinson_foulds(self, t2, attr_t1="name", attr_t2="name",
unrooted_trees=False, expand_polytomies=False,
polytomy_size_limit=5, skip_large_polytomies=False,
correct_by_polytomy_size=False, min_support_t1=0.0,
min_support_t2=0.0):
"""
.. versionadded: 2.2
Returns the Robinson-Foulds symmetric distance between current
tree and a different tree instance.
:param t2: reference tree
:param name attr_t1: Compare trees using a custom node
attribute as a node name.
:param name attr_t2: Compare trees using a custom node
attribute as a node name in target tree.
:param False attr_t2: If True, consider trees as unrooted.
:param False expand_polytomies: If True, all polytomies in the reference
and target tree will be expanded into all possible binary
trees. Robinson-foulds distance will be calculated between all
tree combinations and the minimum value will be returned.
See also, :func:`NodeTree.expand_polytomy`.
:returns: (rf, rf_max, common_attrs, names, edges_t1, edges_t2, discarded_edges_t1, discarded_edges_t2)
"""
ref_t = self
target_t = t2
if not unrooted_trees and (len(ref_t.children) > 2 or len(target_t.children) > 2):
raise TreeError("Unrooted tree found! You may want to activate the unrooted_trees flag.")
if expand_polytomies and correct_by_polytomy_size:
raise TreeError("expand_polytomies and correct_by_polytomy_size are mutually exclusive.")
if expand_polytomies and unrooted_trees:
raise TreeError("expand_polytomies and unrooted_trees arguments cannot be enabled at the same time")
attrs_t1 = set([getattr(n, attr_t1) for n in ref_t.iter_leaves() if hasattr(n, attr_t1)])
attrs_t2 = set([getattr(n, attr_t2) for n in target_t.iter_leaves() if hasattr(n, attr_t2)])
common_attrs = attrs_t1 & attrs_t2
# release mem
attrs_t1, attrs_t2 = None, None
# Check for duplicated items (is it necessary? can we optimize? what's the impact in performance?')
size1 = len([True for n in ref_t.iter_leaves() if getattr(n, attr_t1, None) in common_attrs])
size2 = len([True for n in target_t.iter_leaves() if getattr(n, attr_t2, None) in common_attrs])
if size1 > len(common_attrs):
raise TreeError('Duplicated items found in source tree')
if size2 > len(common_attrs):
raise TreeError('Duplicated items found in reference tree')
if expand_polytomies:
ref_trees = [Tree(nw) for nw in
ref_t.expand_polytomies(map_attr=attr_t1,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies)]
target_trees = [Tree(nw) for nw in
target_t.expand_polytomies(map_attr=attr_t2,
polytomy_size_limit=polytomy_size_limit,
skip_large_polytomies=skip_large_polytomies)]
attr_t1, attr_t2 = "name", "name"
else:
ref_trees = [ref_t]
target_trees = [target_t]
polytomy_correction = 0
if correct_by_polytomy_size:
corr1 = sum([0]+[len(n.children) - 2 for n in ref_t.traverse() if len(n.children) > 2])
corr2 = sum([0]+[len(n.children) - 2 for n in target_t.traverse() if len(n.children) > 2])
if corr1 and corr2:
raise TreeError("Both trees contain polytomies! Try expand_polytomies=True instead")
else:
polytomy_correction = max([corr1, corr2])
min_comparison = None
for t1 in ref_trees:
t1_content = t1.get_cached_content()
t1_leaves = t1_content[t1]
if unrooted_trees:
edges1 = set([
tuple(sorted([tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs])),
tuple(sorted([getattr(n, attr_t1) for n in t1_leaves-content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs]))]))
for content in six.itervalues(t1_content)])
edges1.discard(((),()))
else:
edges1 = set([
tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs]))
for content in six.itervalues(t1_content)])
edges1.discard(())
if min_support_t1:
support_t1 = dict([
(tuple(sorted([getattr(n, attr_t1) for n in content if hasattr(n, attr_t1) and getattr(n, attr_t1) in common_attrs])), branch.support)
for branch, content in six.iteritems(t1_content)])
for t2 in target_trees:
t2_content = t2.get_cached_content()
t2_leaves = t2_content[t2]
if unrooted_trees:
edges2 = set([
tuple(sorted([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs])),
tuple(sorted([getattr(n, attr_t2) for n in t2_leaves-content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))]))
for content in six.itervalues(t2_content)])
edges2.discard(((),()))
else:
edges2 = set([
tuple(sorted([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))
for content in six.itervalues(t2_content)])
edges2.discard(())
if min_support_t2:
support_t2 = dict([
(tuple(sorted(([getattr(n, attr_t2) for n in content if hasattr(n, attr_t2) and getattr(n, attr_t2) in common_attrs]))), branch.support)
for branch, content in six.iteritems(t2_content)])
# if a support value is passed as a constraint, discard lowly supported branches from the analysis
discard_t1, discard_t2 = set(), set()
if min_support_t1 and unrooted_trees:
discard_t1 = set([p for p in edges1 if support_t1.get(p[0], support_t1.get(p[1], 999999999)) < min_support_t1])
elif min_support_t1:
discard_t1 = set([p for p in edges1 if support_t1[p] < min_support_t1])
if min_support_t2 and unrooted_trees:
discard_t2 = set([p for p in edges2 if support_t2.get(p[0], support_t2.get(p[1], 999999999)) < min_support_t2])
elif min_support_t2:
discard_t2 = set([p for p in edges2 if support_t2[p] < min_support_t2])
#rf = len(edges1 ^ edges2) - (len(discard_t1) + len(discard_t2)) - polytomy_correction # poly_corr is 0 if the flag is not enabled
#rf = len((edges1-discard_t1) ^ (edges2-discard_t2)) - polytomy_correction
# the two root edges are never counted here, as they are always
# present in both trees because of the common attr filters
rf = len(((edges1 ^ edges2) - discard_t2) - discard_t1) - polytomy_correction
if unrooted_trees:
# thought this may work, but it does not, still I don't see why
#max_parts = (len(common_attrs)*2) - 6 - len(discard_t1) - len(discard_t2)
max_parts = (len([p for p in edges1 - discard_t1 if len(p[0])>1 and len(p[1])>1]) +
len([p for p in edges2 - discard_t2 if len(p[0])>1 and len(p[1])>1]))
else:
# thought this may work, but it does not, still I don't see why
#max_parts = (len(common_attrs)*2) - 4 - len(discard_t1) - len(discard_t2)
# Otherwise we need to count the actual number of valid
# partitions in each tree -2 is to avoid counting the root
# partition of the two trees (only needed in rooted trees)
max_parts = (len([p for p in edges1 - discard_t1 if len(p)>1]) +
len([p for p in edges2 - discard_t2 if len(p)>1])) - 2
# print max_parts
if not min_comparison or min_comparison[0] > rf:
min_comparison = [rf, max_parts, common_attrs, edges1, edges2, discard_t1, discard_t2]
return min_comparison
def compare(self, ref_tree, use_collateral=False, min_support_source=0.0, min_support_ref=0.0,
has_duplications=False, expand_polytomies=False, unrooted=False,
max_treeko_splits_to_be_artifact=1000, ref_tree_attr='name', source_tree_attr='name'):
"""compare this tree with another using robinson foulds symmetric difference
and number of shared edges. Trees of different sizes and with duplicated
items allowed.
returns: a Python dictionary with results
"""
source_tree = self
def _safe_div(a, b):
if a != 0:
return a / float(b)
else: return 0.0
def _compare(src_tree, ref_tree):
# calculate partitions and rf distances
rf, maxrf, common, ref_p, src_p, ref_disc, src_disc = ref_tree.robinson_foulds(src_tree,
expand_polytomies=expand_polytomies,
unrooted_trees=unrooted,
attr_t1=ref_tree_attr,
attr_t2=source_tree_attr,
min_support_t2=min_support_source,
min_support_t1=min_support_ref)
# if trees share leaves, count their distances
if len(common) > 0 and src_p and ref_p:
if unrooted:
valid_ref_edges = set([p for p in (ref_p - ref_disc) if len(p[0])>1 and len(p[1])>0])
valid_src_edges = set([p for p in (src_p - src_disc) if len(p[0])>1 and len(p[1])>0])
common_edges = valid_ref_edges & valid_src_edges
else:
valid_ref_edges = set([p for p in (ref_p - ref_disc) if len(p)>1])
valid_src_edges = set([p for p in (src_p - src_disc) if len(p)>1])
common_edges = valid_ref_edges & valid_src_edges
else:
valid_ref_edges = set()
valid_src_edges = set()
common_edges = set()
# # % of ref edges found in tree
# ref_found.append(float(len(p2 & p1)) / reftree_edges)
# # valid edges in target, discard also leaves
# p2bis = set([p for p in (p2-d2) if len(p[0])>1 and len(p[1])>1])
# if p2bis:
# incompatible_target_branches = float(len((p2-d2) - p1))
# target_found.append(1 - (incompatible_target_branches / (len(p2-d2))))
return rf, maxrf, len(common), valid_ref_edges, valid_src_edges, common_edges
total_valid_ref_edges = len([n for n in ref_tree.traverse() if n.children and n.support > min_support_ref])
result = {}
if has_duplications:
orig_target_size = len(source_tree)
ntrees, ndups, sp_trees = source_tree.get_speciation_trees(
autodetect_duplications=True, newick_only=True,
target_attr=source_tree_attr, map_features=[source_tree_attr, "support"])
if ntrees < max_treeko_splits_to_be_artifact:
all_rf = []
ref_found = []
src_found = []
tree_sizes = []
all_max_rf = []
common_names = 0
for subtree_nw in sp_trees:
#if seedid and not use_collateral and (seedid not in subtree_nw):
# continue
subtree = source_tree.__class__(subtree_nw, sp_naming_function = source_tree._speciesFunction)
if not subtree.children:
continue
# only necessary if rf function is going to filter by support
# value. It slows downs the analysis, obviously, as it has to
# find the support for each node in the treeko tree from the
# original one.
if min_support_source > 0:
subtree_content = subtree.get_cached_content(store_attr='name')
for n in subtree.traverse():
if n.children:
n.support = source_tree.get_common_ancestor(subtree_content[n]).support
total_rf, max_rf, ncommon, valid_ref_edges, valid_src_edges, common_edges = _compare(subtree, ref_tree)
all_rf.append(total_rf)
all_max_rf.append(max_rf)
tree_sizes.append(ncommon)
if unrooted:
ref_found_in_src = len(common_edges)/float(len(valid_ref_edges)) if valid_ref_edges else None
src_found_in_ref = len(common_edges)/float(len(valid_src_edges)) if valid_src_edges else None
else:
# in rooted trees, we want to discount the root edge
# from the percentage of congruence. Otherwise we will never see a 0%
# congruence for totally different trees
ref_found_in_src = (len(common_edges)-1)/float(len(valid_ref_edges)-1) if len(valid_ref_edges)>1 else None
src_found_in_ref = (len(common_edges)-1)/float(len(valid_src_edges)-1) if len(valid_src_edges)>1 else None
if ref_found_in_src is not None:
ref_found.append(ref_found_in_src)
if src_found_in_ref is not None:
src_found.append(src_found_in_ref)
if all_rf:
# Treeko speciation distance
alld = [_safe_div(all_rf[i], float(all_max_rf[i])) for i in range(len(all_rf))]
a = sum([alld[i] * tree_sizes[i] for i in range(len(all_rf))])
b = float(sum(tree_sizes))
treeko_d = a/b if a else 0.0
result["treeko_dist"] = treeko_d
result["rf"] = utils.mean(all_rf)
result["max_rf"] = max(all_max_rf)
result["effective_tree_size"] = utils.mean(tree_sizes)
result["norm_rf"] = utils.mean([_safe_div(all_rf[i], float(all_max_rf[i])) for i in range(len(all_rf))])
result["ref_edges_in_source"] = utils.mean(ref_found)
result["source_edges_in_ref"] = utils.mean(src_found)
result["source_subtrees"] = len(all_rf)
result["common_edges"] = set()
result["source_edges"] = set()
result["ref_edges"] = set()
else:
total_rf, max_rf, ncommon, valid_ref_edges, valid_src_edges, common_edges = _compare(source_tree, ref_tree)
result["rf"] = float(total_rf) if max_rf else "NA"
result["max_rf"] = float(max_rf)
if unrooted:
result["ref_edges_in_source"] = len(common_edges)/float(len(valid_ref_edges)) if valid_ref_edges else "NA"
result["source_edges_in_ref"] = len(common_edges)/float(len(valid_src_edges)) if valid_src_edges else "NA"
else:
# in rooted trees, we want to discount the root edge from the
# percentage of congruence. Otherwise we will never see a 0%
# congruence for totally different trees
result["ref_edges_in_source"] = (len(common_edges)-1)/float(len(valid_ref_edges)-1) if len(valid_ref_edges)>1 else "NA"
result["source_edges_in_ref"] = (len(common_edges)-1)/float(len(valid_src_edges)-1) if len(valid_src_edges)>1 else "NA"
result["effective_tree_size"] = ncommon
result["norm_rf"] = total_rf/float(max_rf) if max_rf else "NA"
result["treeko_dist"] = "NA"
result["source_subtrees"] = 1
result["common_edges"] = common_edges
result["source_edges"] = valid_src_edges
result["ref_edges"] = valid_ref_edges
return result
def _diff(self, t2, output='topology', attr_t1='name', attr_t2='name', color=True):
"""
.. versionadded:: 2.3
Show or return the difference between two tree topologies.
:param [raw|table|topology|diffs|diffs_tab] output: Output type
"""
from ..tools import ete_diff
difftable = ete_diff.treediff(self, t2, attr1=attr_t1, attr2=attr_t2)
if output == "topology":
ete_diff.show_difftable_topo(difftable, attr_t1, attr_t2, usecolor=color)
elif output == "diffs":
ete_diff.show_difftable(difftable)
elif output == "diffs_tab":
ete_diff.show_difftable_tab(difftable)
elif output == 'table':
rf, rf_max, _, _, _, _, _ = self.robinson_foulds(t2, attr_t1=attr_t1, attr_t2=attr_t2)[:2]
ete_diff.show_difftable_summary(difftable, rf, rf_max)
else:
return difftable
def iter_edges(self, cached_content = None):
'''
.. versionadded:: 2.3
Iterate over the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
'''
if not cached_content:
cached_content = self.get_cached_content()
all_leaves = cached_content[self]
for n, side1 in six.iteritems(cached_content):
yield (side1, all_leaves-side1)
def get_edges(self, cached_content = None):
'''
.. versionadded:: 2.3
Returns the list of edges of a tree. Each egde is represented as a
tuple of two elements, each containing the list of nodes separated by
the edge.
'''
return [edge for edge in self.iter_edges(cached_content)]
def standardize(self, delete_orphan=True, preserve_branch_length=True):
"""
.. versionadded:: 2.3
process current tree structure to produce a standardized topology: nodes
with only one child are removed and multifurcations are automatically resolved.
"""
self.resolve_polytomy()
for n in self.get_descendants():
if len(n.children) == 1:
n.delete(prevent_nondicotomic=True,
preserve_branch_length=preserve_branch_length)
def get_topology_id(self, attr="name"):
'''
.. versionadded:: 2.3
Returns the unique ID representing the topology of the current tree. Two
trees with the same topology will produce the same id. If trees are
unrooted, make sure that the root node is not binary or use the
tree.unroot() function before generating the topology id.
This is useful to detect the number of unique topologies over a bunch of
trees, without requiring full distance methods.
The id is, by default, calculated based on the terminal node's names. Any
other node attribute could be used instead.
'''
edge_keys = []
for s1, s2 in self.get_edges():
k1 = sorted([getattr(e, attr) for e in s1])
k2 = sorted([getattr(e, attr) for e in s2])
edge_keys.append(sorted([k1, k2]))
return md5(str(sorted(edge_keys)).encode('utf-8')).hexdigest()
# def get_partitions(self):
# """
# .. versionadded: 2.1
# It returns the set of all possible partitions under a
# node. Note that current implementation is quite inefficient
# when used in very large trees.
# t = Tree("((a, b), e);")
# partitions = t.get_partitions()
# # Will return:
# # a,b,e
# # a,e
# # b,e
# # a,b
# # e
# # b
# # a
# """
# all_leaves = frozenset(self.get_leaf_names())
# all_partitions = set([all_leaves])
# for n in self.iter_descendants():
# p1 = frozenset(n.get_leaf_names())
# p2 = frozenset(all_leaves - p1)
# all_partitions.add(p1)
# all_partitions.add(p2)
# return all_partitions
def convert_to_ultrametric(self, tree_length=None, strategy='balanced'):
"""
.. versionadded: 2.1
Converts a tree into ultrametric topology (all leaves must have
the same distance to root). Note that, for visual inspection
of ultrametric trees, node.img_style["size"] should be set to
0.
"""
# Could something like this replace the old algorithm?
#most_distant_leaf, tree_length = self.get_farthest_leaf()
#for leaf in self:
# d = leaf.get_distance(self)
# leaf.dist += (tree_length - d)
#return
# pre-calculate how many splits remain under each node
node2max_depth = {}
for node in self.traverse("postorder"):
if not node.is_leaf():
max_depth = max([node2max_depth[c] for c in node.children]) + 1
node2max_depth[node] = max_depth
else:
node2max_depth[node] = 1
node2dist = {self: 0.0}
if not tree_length:
most_distant_leaf, tree_length = self.get_farthest_leaf()
else:
tree_length = float(tree_length)
step = tree_length / node2max_depth[self]
for node in self.iter_descendants("levelorder"):
if strategy == "balanced":
node.dist = (tree_length - node2dist[node.up]) / node2max_depth[node]
node2dist[node] = node.dist + node2dist[node.up]
elif strategy == "fixed":
if not node.is_leaf():
node.dist = step
else:
node.dist = tree_length - ((node2dist[node.up]) * step)
node2dist[node] = node2dist[node.up] + 1
node.dist = node.dist
def check_monophyly(self, values, target_attr, ignore_missing=False,
unrooted=False):
"""
.. versionadded: 2.2
Returns True if a given target attribute is monophyletic under
this node for the provided set of values.
If not all values are represented in the current tree
structure, a ValueError exception will be raised to warn that
strict monophyly could never be reached (this behaviour can be
avoided by enabling the `ignore_missing` flag.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees, or any custom feature present in the tree).
:param False ignore_missing: Avoid raising an Exception when
missing attributes are found.
.. versionchanged: 2.3
:param False unrooted: If True, tree will be treated as unrooted, thus
allowing to find monophyly even when current outgroup is spliting a
monophyletic group.
:returns: the following tuple
IsMonophyletic (boolean),
clade type ('monophyletic', 'paraphyletic' or 'polyphyletic'),
leaves breaking the monophyly (set)
"""
if type(values) != set:
values = set(values)
# This is the only time I traverse the tree, then I use cached
# leaf content
n2leaves = self.get_cached_content()
# Raise an error if requested attribute values are not even present
if ignore_missing:
found_values = set([getattr(n, target_attr) for n in n2leaves[self]])
missing_values = values - found_values
values = values & found_values
# Locate leaves matching requested attribute values
targets = set([leaf for leaf in n2leaves[self]
if getattr(leaf, target_attr) in values])
if not ignore_missing:
if values - set([getattr(leaf, target_attr) for leaf in targets]):
raise ValueError('The monophyly of the provided values could never be reached, as not all of them exist in the tree.'
' Please check your target attribute and values, or set the ignore_missing flag to True')
if unrooted:
smallest = None
for side1, side2 in self.iter_edges(cached_content=n2leaves):
if targets.issubset(side1) and (not smallest or len(side1) < len(smallest)):
smallest = side1
elif targets.issubset(side2) and (not smallest or len(side2) < len(smallest)):
smallest = side2
if smallest is not None and len(smallest) == len(targets):
break
foreign_leaves = smallest - targets
else:
# Check monophyly with get_common_ancestor. Note that this
# step does not require traversing the tree again because
# targets are node instances instead of node names, and
# get_common_ancestor function is smart enough to detect it
# and avoid unnecessary traversing.
common = self.get_common_ancestor(targets)
observed = n2leaves[common]
foreign_leaves = set([leaf for leaf in observed
if getattr(leaf, target_attr) not in values])
if not foreign_leaves:
return True, "monophyletic", foreign_leaves
else:
# if the requested attribute is not monophyletic in this
# node, let's differentiate between poly and paraphyly.
poly_common = self.get_common_ancestor(foreign_leaves)
# if the common ancestor of all foreign leaves is self
# contained, we have a paraphyly. Otherwise, polyphyly.
polyphyletic = [leaf for leaf in poly_common if
getattr(leaf, target_attr) in values]
if polyphyletic:
return False, "polyphyletic", foreign_leaves
else:
return False, "paraphyletic", foreign_leaves
def get_monophyletic(self, values, target_attr):
"""
.. versionadded:: 2.2
Returns a list of nodes matching the provided monophyly
criteria. For a node to be considered a match, all
`target_attr` values within and node, and exclusively them,
should be grouped.
:param values: a set of values for which monophyly is
expected.
:param target_attr: node attribute being used to check
monophyly (i.e. species for species trees, names for gene
family trees).
"""
if type(values) != set:
values = set(values)
n2values = self.get_cached_content(store_attr=target_attr)
is_monophyletic = lambda node: n2values[node] == values
for match in self.iter_leaves(is_leaf_fn=is_monophyletic):
if is_monophyletic(match):
yield match
def expand_polytomies(self, map_attr="name", polytomy_size_limit=5,
skip_large_polytomies=False):
'''
.. versionadded:: 2.3
Given a tree with one or more polytomies, this functions returns the
list of all trees (in newick format) resulting from the combination of
all possible solutions of the multifurcated nodes.
.. warning:
Please note that the number of of possible binary trees grows
exponentially with the number and size of polytomies. Using this
function with large multifurcations is not feasible:
polytomy size: 3 number of binary trees: 3
polytomy size: 4 number of binary trees: 15
polytomy size: 5 number of binary trees: 105
polytomy size: 6 number of binary trees: 945
polytomy size: 7 number of binary trees: 10395
polytomy size: 8 number of binary trees: 135135
polytomy size: 9 number of binary trees: 2027025
http://ajmonline.org/2010/darwin.php
'''
class TipTuple(tuple):
pass
def add_leaf(tree, label):
yield (label, tree)
if not isinstance(tree, TipTuple) and isinstance(tree, tuple):
for left in add_leaf(tree[0], label):
yield (left, tree[1])
for right in add_leaf(tree[1], label):
yield (tree[0], right)
def enum_unordered(labels):
if len(labels) == 1:
yield labels[0]
else:
for tree in enum_unordered(labels[1:]):
for new_tree in add_leaf(tree, labels[0]):
yield new_tree
n2subtrees = {}
for n in self.traverse("postorder"):
if n.is_leaf():
subtrees = [getattr(n, map_attr)]
else:
subtrees = []
if len(n.children) > polytomy_size_limit:
if skip_large_polytomies:
for childtrees in itertools.product(*[n2subtrees[ch] for ch in n.children]):
subtrees.append(TipTuple(childtrees))
else:
raise TreeError("Found polytomy larger than current limit: %s" %n)
else:
for childtrees in itertools.product(*[n2subtrees[ch] for ch in n.children]):
subtrees.extend([TipTuple(subtree) for subtree in enum_unordered(childtrees)])
n2subtrees[n] = subtrees
return ["%s;"%str(nw) for nw in n2subtrees[self]] # tuples are in newick format ^_^
def resolve_polytomy(self, default_dist=0.0, default_support=0.0,
recursive=True):
"""
.. versionadded: 2.2
Resolve all polytomies under current node by creating an
arbitrary dicotomic structure among the affected nodes. This
function randomly modifies current tree topology and should
only be used for compatibility reasons (i.e. programs
rejecting multifurcated node in the newick representation).
:param 0.0 default_dist: artificial branch distance of new
nodes.
:param 0.0 default_support: artificial branch support of new
nodes.
:param True recursive: Resolve any polytomy under this
node. When False, only current node will be checked and fixed.
"""
def _resolve(node):
if len(node.children) > 2:
children = list(node.children)
node.children = []
next_node = root = node
for i in range(len(children)-2):
next_node = next_node.add_child()
next_node.dist = default_dist
next_node.support = default_support
next_node = root
for ch in children:
next_node.add_child(ch)
if ch != children[-2]:
next_node = next_node.children[0]
target = [self]
if recursive:
target.extend([n for n in self.get_descendants()])
for n in target:
_resolve(n)
def add_face(self, face, column, position="branch-right"):
"""
.. versionadded: 2.1
Add a fixed face to the node. This type of faces will be
always attached to nodes, independently of the layout
function.
:argument face: a Face or inherited instance
:argument column: An integer number starting from 0
:argument "branch-right" position: Posible values are:
"branch-right", "branch-top", "branch-bottom", "float",
"aligned"
"""
if not hasattr(self, "_faces"):
self._faces = _FaceAreas()
if position not in FACE_POSITIONS:
raise ValueError("face position not in %s" %FACE_POSITIONS)
if isinstance(face, Face):
getattr(self._faces, position).add_face(face, column=column)
else:
raise ValueError("not a Face instance")
def set_style(self, node_style):
"""
.. versionadded: 2.1
Set 'node_style' as the fixed style for the current node.
"""
if TREEVIEW:
if node_style is None:
node_style = NodeStyle()
if type(node_style) is NodeStyle:
self._img_style = node_style
else:
raise ValueError("Treeview module is disabled")
@staticmethod
def from_parent_child_table(parent_child_table):
"""Converts a parent-child table into an ETE Tree instance.
:argument parent_child_table: a list of tuples containing parent-child
relationsships. For example: [("A", "B", 0.1), ("A", "C", 0.2), ("C",
"D", 1), ("C", "E", 1.5)]. Where each tuple represents: [parent, child,
child-parent-dist]
:returns: A new Tree instance
:example:
>>> tree = Tree.from_parent_child_table([("A", "B", 0.1), ("A", "C", 0.2), ("C", "D", 1), ("C", "E", 1.5)])
>>> print tree
"""
def get_node(nodename, dist=None):
if nodename not in nodes_by_name:
nodes_by_name[nodename] = Tree(name=nodename, dist=dist)
node = nodes_by_name[nodename]
if dist is not None:
node.dist = dist
node.name = nodename
return nodes_by_name[nodename]
nodes_by_name = {}
for columns in parent_child_table:
if len(columns) == 3:
parent_name, child_name, distance = columns
dist = float(distance)
else:
parent_name, child_name = columns
dist = None
parent = get_node(parent_name)
parent.add_child(get_node(child_name, dist=dist))
root = parent.get_tree_root()
return root
@staticmethod
def from_skbio(skbio_tree, map_attributes=None):
"""Converts a scikit-bio TreeNode object into ETE Tree object.
:argument skbio_tree: a scikit bio TreeNode instance
:argument None map_attributes: A list of attribute nanes in the
scikit-bio tree that should be mapped into the ETE tree
instance. (name, id and branch length are always mapped)
:returns: A new Tree instance
:example:
>>> tree = Tree.from_skibio(skbioTree, map_attributes=["value"])
"""
from skbio import TreeNode as skbioTreeNode
def get_ete_node(skbio_node):
ete_node = all_nodes.get(skbio_node, Tree())
if skbio_node.length is not None:
ete_node.dist = float(skbio_node.length)
ete_node.name = skbio_node.name
ete_node.add_features(id=skbio_node.id)
if map_attributes:
for a in map_attributes:
ete_node.add_feature(a, getattr(skbio_node, a, None))
return ete_node
all_nodes = {}
if isinstance(skbio_tree, skbioTreeNode):
for node in skbio_tree.preorder(include_self=True):
all_nodes[node] = get_ete_node(node)
ete_node = all_nodes[node]
for ch in node.children:
ete_ch = get_ete_node(ch)
ete_node.add_child(ete_ch)
all_nodes[ch] = ete_ch
return ete_ch.get_tree_root()
def phonehome(self):
from .. import _ph
_ph.call()
def _translate_nodes(root, *nodes):
name2node = dict([ [n, None] for n in nodes if type(n) is str])
for n in root.traverse():
if n.name in name2node:
if name2node[n.name] is not None:
raise TreeError("Ambiguous node name: "+str(n.name))
else:
name2node[n.name] = n
if None in list(name2node.values()):
notfound = [key for key, value in six.iteritems(name2node) if value is None]
raise ValueError("Node names not found: "+str(notfound))
valid_nodes = []
for n in nodes:
if type(n) is not str:
if type(n) is not root.__class__ :
raise TreeError("Invalid target node: "+str(n))
else:
valid_nodes.append(n)
valid_nodes.extend(list(name2node.values()))
if len(valid_nodes) == 1:
return valid_nodes[0]
else:
return valid_nodes
# Alias
#: .. currentmodule:: ete3
Tree = TreeNode
|
karrtikr/ete
|
ete3/coretype/tree.py
|
Python
|
gpl-3.0
| 91,899
|
[
"scikit-bio"
] |
40340d0dc54114da4c26d4e6bd71788c53a669b1c54adc5fc0b2928019d065cc
|
import simtk.openmm as openmm
import simtk.unit as unit
import mdtraj as md
import numpy as np
import copy
import enum
InteractionGroup = enum.Enum("InteractionGroup", ['unique_old', 'unique_new', 'core', 'environment'])
#######LOGGING#############################
import logging
logging.basicConfig(level = logging.NOTSET)
_logger = logging.getLogger("relative")
_logger.setLevel(logging.INFO)
###########################################
class HybridTopologyFactory(object):
"""
This class generates a hybrid topology based on a perses topology proposal. This class treats atoms
in the resulting hybrid system as being from one of four classes:
unique_old_atom : these atoms are not mapped and only present in the old system. Their interactions will be on for
lambda=0, off for lambda=1
unique_new_atom : these atoms are not mapped and only present in the new system. Their interactions will be off
for lambda=0, on for lambda=1
core_atom : these atoms are mapped, and are part of a residue that is changing. Their interactions will be those
corresponding to the old system at lambda=0, and those corresponding to the new system at lambda=1
environment_atom : these atoms are mapped, and are not part of a changing residue. Their interactions are always
on and are alchemically unmodified.
Properties
----------
hybrid_system : openmm.System
The hybrid system for simulation
new_to_hybrid_atom_map : dict of int : int
The mapping of new system atoms to hybrid atoms
old_to_hybrid_atom_map : dict of int : int
The mapping of old system atoms to hybrid atoms
hybrid_positions : [n, 3] np.ndarray
The positions of the hybrid system
hybrid_topology : mdtraj.Topology
The topology of the hybrid system
omm_hybrid_topology : openmm.app.Topology
The OpenMM topology object corresponding to the hybrid system
.. warning :: This API is experimental and subject to change.
"""
_known_forces = {'HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce', 'NonbondedForce', 'MonteCarloBarostat'}
def __init__(self,
topology_proposal,
current_positions,
new_positions,
use_dispersion_correction=False,
functions=None,
softcore_alpha=None,
bond_softening_constant=1.0,
angle_softening_constant=1.0,
soften_only_new=False,
neglected_new_angle_terms=[],
neglected_old_angle_terms=[],
softcore_LJ_v2=True,
softcore_electrostatics=True,
softcore_LJ_v2_alpha=0.85,
softcore_electrostatics_alpha=0.3,
softcore_sigma_Q=1.0,
interpolate_old_and_new_14s=False,
omitted_terms=None,
flatten_torsions=False,
rmsd_restraint=False,
impose_virtual_bonds=True,
endstate=None,
**kwargs):
"""
Initialize the Hybrid topology factory.
Parameters
----------
topology_proposal : perses.rjmc.topology_proposal.TopologyProposal object
TopologyProposal object rendered by the ProposalEngine
current_positions : [n,3] np.ndarray of float
The positions of the "old system"
new_positions : [m,3] np.ndarray of float
The positions of the "new system"
use_dispersion_correction : bool, default False
Whether to use the long range correction in the custom sterics force. This is very expensive for NCMC.
functions : dict, default None
Alchemical functions that determine how each force is scaled with lambda. The keys must be strings with
names beginning with ``lambda_`` and ending with each of bonds, angles, torsions, sterics, electrostatics.
If functions is none, then the integrator will need to set each of these and parameter derivatives will be unavailable.
If functions is not None, all lambdas must be specified.
softcore_alpha: float, default None
"alpha" parameter of softcore sterics. If None is provided, value will be set to 0.5
bond_softening_constant : float
For bonds between unique atoms and unique-core atoms, soften the force constant at the "dummy" endpoint by this factor.
If 1.0, do not soften
angle_softening_constant : float
For bonds between unique atoms and unique-core atoms, soften the force constant at the "dummy" endpoint by this factor.
If 1.0, do not soften
neglected_new_angle_terms : list
list of indices from the HarmonicAngleForce of the new_system for which the geometry engine neglected.
Hence, these angles must be alchemically grown in for the unique new atoms (forward lambda protocol)
neglected_old_angle_terms : list
list of indices from the HarmonicAngleForce of the old_system for which the geometry engine neglected.
Hence, these angles must be alchemically deleted for the unique old atoms (reverse lambda protocol)
softcore_LJ_v2 : bool, default True
implement a new softcore LJ: citation below.
Gapsys, Vytautas, Daniel Seeliger, and Bert L. de Groot. "New soft-core potential function for molecular dynamics based alchemical free energy calculations." Journal of chemical theory and computation 8.7 (2012): 2373-2382.
softcore_electrostatics : bool, default True
softcore electrostatics: citation below.
Gapsys, Vytautas, Daniel Seeliger, and Bert L. de Groot. "New soft-core potential function for molecular dynamics based alchemical free energy calculations." Journal of chemical theory and computation 8.7 (2012): 2373-2382.
softcore_LJ_v2_alpha : float, default 0.85
softcore alpha parameter for LJ v2
softcore_electrostatics_alpha : float, default 0.3
softcore alpha parameter for softcore electrostatics.
softcore_sigma_Q : float, default 1.0
softcore sigma parameter for softcore electrostatics.
interpolate_old_and_new_14s : bool, default False
whether to turn off interactions for new exceptions (not just 1,4s) at lambda = 0 and old exceptions at lambda = 1; if False, they are present in the nonbonded force
omitted_terms : dict
dictionary of terms (by new topology index) that must be annealed in over a lambda protocol
rmsd_restraint : bool, optional, default=False
If True, impose an RMSD restraint between core heavy atoms and protein CA atoms
impose_virtual_bonds : bool, optional, default=True
If True, impose virtual bonds to ensure components of the system are imaged together
flatten_torsions : bool, default False
if True, torsion terms involving `unique_new_atoms` will be scaled such that at lambda=0,1, the torsion term is turned off/on respectively
the opposite is true for `unique_old_atoms`.
endstate : int
the lambda endstate to parameterize. should always be None for HybridTopologyFactory, but must be 0 or 1 for the RepartitionedHybridTopologyFactory
TODO: Document how positions for hybrid system are constructed
TODO: allow support for annealing in omitted terms
"""
if endstate == 0 or endstate == 1:
raise Exception("endstate must be none! Aborting!")
elif endstate is None:
_logger.info("*** Generating vanilla HybridTopologyFactory ***")
_logger.info("Beginning nonbonded method, total particle, barostat, and exceptions retrieval...")
self._topology_proposal = topology_proposal
self._old_system = copy.deepcopy(topology_proposal.old_system)
self._new_system = copy.deepcopy(topology_proposal.new_system)
self._old_to_hybrid_map = {}
self._new_to_hybrid_map = {}
self._hybrid_system_forces = dict()
self._old_positions = current_positions
self._new_positions = new_positions
self._soften_only_new = soften_only_new
self._interpolate_14s = interpolate_old_and_new_14s
self.omitted_terms = omitted_terms
self._flatten_torsions = flatten_torsions
if self._flatten_torsions:
_logger.info("Flattening torsions of unique new/old at lambda = 0/1")
if self._interpolate_14s:
_logger.info("Flattening exceptions of unique new/old at lambda = 0/1")
if omitted_terms is not None:
raise Exception(f"annealing of omitted terms is not currently supported. Aborting!")
# New attributes from the modified geometry engine
if neglected_old_angle_terms:
self.neglected_old_angle_terms = neglected_old_angle_terms
else:
self.neglected_old_angle_terms = []
if neglected_new_angle_terms:
self.neglected_new_angle_terms = neglected_new_angle_terms
else:
self.neglected_new_angle_terms = []
if bond_softening_constant != 1.0:
self._bond_softening_constant = bond_softening_constant
self._soften_bonds = True
else:
self._soften_bonds = False
if angle_softening_constant != 1.0:
self._angle_softening_constant = angle_softening_constant
self._soften_angles = True
else:
self._soften_angles = False
self._use_dispersion_correction = use_dispersion_correction
self._softcore_LJ_v2 = softcore_LJ_v2
if self._softcore_LJ_v2:
self._softcore_LJ_v2_alpha = softcore_LJ_v2_alpha
assert self._softcore_LJ_v2_alpha >= 0.0 and self._softcore_LJ_v2_alpha <= 1.0, f"softcore_LJ_v2_alpha: ({self._softcore_LJ_v2_alpha}) is not in [0,1]"
self._softcore_electrostatics = softcore_electrostatics
if self._softcore_electrostatics:
self._softcore_electrostatics_alpha = softcore_electrostatics_alpha
self._softcore_sigma_Q = softcore_sigma_Q
assert self._softcore_electrostatics_alpha >= 0.0 and self._softcore_electrostatics_alpha <= 1.0, f"softcore_electrostatics_alpha: ({self._softcore_electrostatics_alpha}) is not in [0,1]"
assert self._softcore_sigma_Q >= 0.0 and self._softcore_sigma_Q <= 1.0, f"softcore_sigma_Q : {self._softcore_sigma_Q} is not in [0, 1]"
if softcore_alpha is None:
self.softcore_alpha = 0.5
else:
# TODO: Check that softcore_alpha is in a valid range
self.softcore_alpha = softcore_alpha
if functions:
self._functions = functions
self._has_functions = True
else:
self._has_functions = False
# Prepare dicts of forces, which will be useful later
# TODO: Store this as self._system_forces[name], name in ('old', 'new', 'hybrid') for compactness
self._old_system_forces = {type(force).__name__ : force for force in self._old_system.getForces()}
self._new_system_forces = {type(force).__name__ : force for force in self._new_system.getForces()}
_logger.info(f"Old system forces: {self._old_system_forces.keys()}")
_logger.info(f"New system forces: {self._new_system_forces.keys()}")
# Check that there are no unknown forces in the new and old systems:
for system_name in ('old', 'new'):
force_names = getattr(self, '_{}_system_forces'.format(system_name)).keys()
unknown_forces = set(force_names) - set(self._known_forces)
if len(unknown_forces) > 0:
raise ValueError(f"Unknown forces {unknown_forces} encountered in {system_name} system")
_logger.info("No unknown forces.")
# Get and store the nonbonded method from the system:
self._nonbonded_method = self._old_system_forces['NonbondedForce'].getNonbondedMethod()
_logger.info(f"Nonbonded method to be used (i.e. from old system): {self._nonbonded_method}")
# Start by creating an empty system. This will become the hybrid system.
self._hybrid_system = openmm.System()
# Begin by copying all particles in the old system to the hybrid system. Note that this does not copy the
# interactions. It does, however, copy the particle masses. In general, hybrid index and old index should be
# the same.
# TODO: Refactor this into self._add_particles()
_logger.info("Adding and mapping old atoms to hybrid system...")
for particle_idx in range(self._topology_proposal.n_atoms_old):
particle_mass_old = self._old_system.getParticleMass(particle_idx)
if particle_idx in self._topology_proposal.old_to_new_atom_map.keys():
particle_index_in_new_system = self._topology_proposal.old_to_new_atom_map[particle_idx]
particle_mass_new = self._new_system.getParticleMass(particle_index_in_new_system)
particle_mass = (particle_mass_old + particle_mass_new) / 2 # Take the average of the masses if the atom is mapped
else:
particle_mass = particle_mass_old
hybrid_idx = self._hybrid_system.addParticle(particle_mass)
self._old_to_hybrid_map[particle_idx] = hybrid_idx
# If the particle index in question is mapped, make sure to add it to the new to hybrid map as well.
if particle_idx in self._topology_proposal.old_to_new_atom_map.keys():
self._new_to_hybrid_map[particle_index_in_new_system] = hybrid_idx
# Next, add the remaining unique atoms from the new system to the hybrid system and map accordingly.
# As before, this does not copy interactions, only particle indices and masses.
_logger.info("Adding and mapping new atoms to hybrid system...")
for particle_idx in self._topology_proposal.unique_new_atoms:
particle_mass = self._new_system.getParticleMass(particle_idx)
hybrid_idx = self._hybrid_system.addParticle(particle_mass)
self._new_to_hybrid_map[particle_idx] = hybrid_idx
# Check that if there is a barostat in the original system, it is added to the hybrid.
# We copy the barostat from the old system.
if "MonteCarloBarostat" in self._old_system_forces.keys():
barostat = copy.deepcopy(self._old_system_forces["MonteCarloBarostat"])
self._hybrid_system.addForce(barostat)
_logger.info("Added MonteCarloBarostat.")
else:
_logger.info("No MonteCarloBarostat added.")
# Copy over the box vectors:
box_vectors = self._old_system.getDefaultPeriodicBoxVectors()
self._hybrid_system.setDefaultPeriodicBoxVectors(*box_vectors)
_logger.info(f"getDefaultPeriodicBoxVectors added to hybrid: {box_vectors}")
# Create the opposite atom maps for use in nonbonded force processing; let's omit this from logger
self._hybrid_to_old_map = {value : key for key, value in self._old_to_hybrid_map.items()}
self._hybrid_to_new_map = {value : key for key, value in self._new_to_hybrid_map.items()}
# Assign atoms to one of the classes described in the class docstring
self._atom_classes = self._determine_atom_classes()
_logger.info("Determined atom classes.")
# Construct dictionary of exceptions in old and new systems
_logger.info("Generating old system exceptions dict...")
self._old_system_exceptions = self._generate_dict_from_exceptions(self._old_system_forces['NonbondedForce'])
_logger.info("Generating new system exceptions dict...")
self._new_system_exceptions = self._generate_dict_from_exceptions(self._new_system_forces['NonbondedForce'])
self._validate_disjoint_sets()
# Copy constraints, checking to make sure they are not changing
_logger.info("Handling constraints...")
self._handle_constraints()
# Copy over relevant virtual sites
_logger.info("Handling virtual sites...")
self._handle_virtual_sites()
# Call each of the methods to add the corresponding force terms and prepare the forces:
_logger.info("Adding bond force terms...")
self._add_bond_force_terms()
_logger.info("Adding angle force terms...")
self._add_angle_force_terms()
_logger.info("Adding torsion force terms...")
self._add_torsion_force_terms()
if 'NonbondedForce' in self._old_system_forces or 'NonbondedForce' in self._new_system_forces:
_logger.info("Adding nonbonded force terms...")
self._add_nonbonded_force_terms()
# Call each force preparation method to generate the actual interactions that we need:
_logger.info("Handling harmonic bonds...")
self.handle_harmonic_bonds()
_logger.info("Handling harmonic angles...")
self.handle_harmonic_angles()
_logger.info("Handling torsion forces...")
self.handle_periodic_torsion_force()
if 'NonbondedForce' in self._old_system_forces or 'NonbondedForce' in self._new_system_forces:
_logger.info("Handling nonbonded forces...")
self.handle_nonbonded()
if 'NonbondedForce' in self._old_system_forces or 'NonbondedForce' in self._new_system_forces:
_logger.info("Handling unique_new/old interaction exceptions...")
if len(self._old_system_exceptions.keys()) == 0 and len(self._new_system_exceptions.keys()) == 0:
_logger.info("There are no old/new system exceptions.")
else:
_logger.info("There are old or new system exceptions...proceeding.")
self.handle_old_new_exceptions()
# Get positions for the hybrid
self._hybrid_positions = self._compute_hybrid_positions()
# Generate the topology representation
self._hybrid_topology = self._create_topology()
# Impose RMSD restraint, if requested
if rmsd_restraint:
_logger.info("Attempting to impose RMSD restraints.")
self._impose_rmsd_restraint()
# Impose virtual bonds to ensure system is imaged together.
if impose_virtual_bonds:
_logger.info("Imposing virtual bonds to ensure system is imaged together.")
self._impose_virtual_bonds()
def _validate_disjoint_sets(self):
"""
Conduct a sanity check to make sure that the hybrid maps of the old and new system exception dict keys do not contain both environment and unique_old/new atoms
"""
for old_indices in self._old_system_exceptions.keys():
hybrid_indices = (self._old_to_hybrid_map[old_indices[0]], self._old_to_hybrid_map[old_indices[1]])
if set(old_indices).intersection(self._atom_classes['environment_atoms']) != set():
assert set(old_indices).intersection(self._atom_classes['unique_old_atoms']) == set(), f"old index exceptions {old_indices} include unique old and environment atoms, which is disallowed"
for new_indices in self._new_system_exceptions.keys():
hybrid_indices = (self._new_to_hybrid_map[new_indices[0]], self._new_to_hybrid_map[new_indices[1]])
if set(hybrid_indices).intersection(self._atom_classes['environment_atoms']) != set():
assert set(hybrid_indices).intersection(self._atom_classes['unique_new_atoms']) == set(), f"new index exceptions {new_indices} include unique new and environment atoms, which is disallowed"
def _handle_virtual_sites(self):
"""
Ensure that all virtual sites in old and new system are copied over to the hybrid system. Note that we do not
support virtual sites in the changing region.
"""
for system_name in ('old', 'new'):
system = getattr(self._topology_proposal, '{}_system'.format(system_name))
hybrid_atom_map = getattr(self, '_{}_to_hybrid_map'.format(system_name))
# Loop through virtual sites
numVirtualSites = 0
for particle_idx in range(system.getNumParticles()):
if system.isVirtualSite(particle_idx):
numVirtualSites += 1
# If it's a virtual site, make sure it is not in the unique or core atoms, since this is currently unsupported
hybrid_idx = hybrid_atom_map[particle_idx]
if hybrid_idx not in self._atom_classes['environment_atoms']:
raise Exception("Virtual sites in changing residue are unsupported.")
else:
virtual_site = system.getVirtualSite(particle_idx)
self._hybrid_system.setVirtualSite(hybrid_idx, virtual_site)
_logger.info(f"\t_handle_virtual_sites: numVirtualSites: {numVirtualSites}")
def _determine_core_atoms_in_topology(self, topology, unique_atoms, mapped_atoms, hybrid_map, residue_to_switch):
"""
Given a topology and its corresponding unique and mapped atoms, return the set of atom indices in the
hybrid system which would belong to the "core" atom class
Parameters
----------
topology : simtk.openmm.app.Topology
An OpenMM topology representing a system of interest
unique_atoms : set of int
A set of atoms that are unique to this topology
mapped_atoms : set of int
A set of atoms that are mapped to another topology
residue_to_switch : str
string name of a residue that is being mutated
Returns
-------
core_atoms : set of int
set of core atom indices in hybrid topology
"""
core_atoms = set()
# Loop through the residues to look for ones with unique atoms
for residue in topology.residues():
atom_indices_old_system = {atom.index for atom in residue.atoms()}
# If the residue contains an atom index that is unique, then the residue is changing.
# We determine this by checking if the atom indices of the residue have any intersection with the unique atoms
# likewise, if the name of the residue matches the residue_to_match, then we look for mapped atoms
if len(atom_indices_old_system.intersection(unique_atoms)) > 0 or residue_to_switch == residue.name:
# We can add the atoms in this residue which are mapped to the core_atoms set:
for atom_index in atom_indices_old_system:
if atom_index in mapped_atoms:
# We specifically want to add the hybrid atom.
hybrid_index = hybrid_map[atom_index]
core_atoms.add(hybrid_index)
assert len(core_atoms) >= 3, 'Cannot run a simulation with fewer than 3 core atoms. System has {len(core_atoms)}'
return core_atoms
def _determine_atom_classes(self):
"""
This method determines whether each atom belongs to unique old, unique new, core, or environment, as defined above.
All the information required is contained in the TopologyProposal passed to the constructor. All indices are
indices in the hybrid system.
Returns
-------
atom_classes : dict of list
A dictionary of the form {'core' :core_list} etc.
"""
atom_classes = {'unique_old_atoms' : set(), 'unique_new_atoms' : set(), 'core_atoms' : set(), 'environment_atoms' : set()}
# First, find the unique old atoms, as this is the most straightforward:
for atom_idx in self._topology_proposal.unique_old_atoms:
hybrid_idx = self._old_to_hybrid_map[atom_idx]
atom_classes['unique_old_atoms'].add(hybrid_idx)
# Then the unique new atoms (this is substantially the same as above)
for atom_idx in self._topology_proposal.unique_new_atoms:
hybrid_idx = self._new_to_hybrid_map[atom_idx]
atom_classes['unique_new_atoms'].add(hybrid_idx)
# The core atoms:
core_atoms = []
for new_idx, old_idx in self._topology_proposal._core_new_to_old_atom_map.items():
new_to_hybrid_idx, old_to_hybrid_index = self._new_to_hybrid_map[new_idx], self._old_to_hybrid_map[old_idx]
assert new_to_hybrid_idx == old_to_hybrid_index, f"there is a -to_hybrid naming collision in topology proposal core atom map: {self._topology_proposal._core_new_to_old_atom_map}"
core_atoms.append(new_to_hybrid_idx)
new_to_hybrid_environment_atoms = set([self._new_to_hybrid_map[idx] for idx in self._topology_proposal._new_environment_atoms])
old_to_hybrid_environment_atoms = set([self._old_to_hybrid_map[idx] for idx in self._topology_proposal._old_environment_atoms])
assert new_to_hybrid_environment_atoms == old_to_hybrid_environment_atoms, f"there is a -to_hybrid naming collisions in topology proposal environment atom map: new_to_hybrid: {new_to_hybrid_environment_atoms}; old_to_hybrid: {old_to_hybrid_environment_atoms}"
atom_classes['core_atoms'] = set(core_atoms)
atom_classes['environment_atoms'] = new_to_hybrid_environment_atoms # since we asserted this is identical to old_to_hybrid_environment_atoms
return atom_classes
def _translate_nonbonded_method_to_custom(self, standard_nonbonded_method):
"""
Utility function to translate the nonbonded method enum from the standard nonbonded force to the custom version
`CutoffPeriodic`, `PME`, and `Ewald` all become `CutoffPeriodic`; `NoCutoff` becomes `NoCutoff`; `CutoffNonPeriodic` becomes `CutoffNonPeriodic`
Parameters
----------
standard_nonbonded_method : openmm.NonbondedForce.NonbondedMethod
the nonbonded method of the standard force
Returns
-------
custom_nonbonded_method : openmm.CustomNonbondedForce.NonbondedMethod
the nonbonded method for the equivalent customnonbonded force
"""
if standard_nonbonded_method in [openmm.NonbondedForce.CutoffPeriodic, openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]:
return openmm.CustomNonbondedForce.CutoffPeriodic
elif standard_nonbonded_method == openmm.NonbondedForce.NoCutoff:
return openmm.CustomNonbondedForce.NoCutoff
elif standard_nonbonded_method == openmm.NonbondedForce.CutoffNonPeriodic:
return openmm.CustomNonbondedForce.CutoffNonPeriodic
else:
raise NotImplementedError("This nonbonded method is not supported.")
def _handle_constraints(self):
"""
This method adds relevant constraints from the old and new systems.
First, all constraints from the old systenm are added.
Then, constraints to atoms unique to the new system are added.
"""
constraint_lengths = dict() # lengths of constraints already added
for system_name in ('old', 'new'):
system = getattr(self._topology_proposal, '{}_system'.format(system_name))
hybrid_map = getattr(self, '_{}_to_hybrid_map'.format(system_name))
for constraint_idx in range(system.getNumConstraints()):
atom1, atom2, length = system.getConstraintParameters(constraint_idx)
hybrid_atoms = tuple(sorted([hybrid_map[atom1], hybrid_map[atom2]]))
if hybrid_atoms not in constraint_lengths.keys():
self._hybrid_system.addConstraint(hybrid_atoms[0], hybrid_atoms[1], length)
constraint_lengths[hybrid_atoms] = length
else:
# TODO: We can skip this if we have already checked for constraints changing lengths
if constraint_lengths[hybrid_atoms] != length:
raise Exception('Constraint length is changing for atoms {} in hybrid system: old {} new {}'.format(hybrid_atoms, constraint_lengths[hybrid_atoms], length))
_logger.debug(f"\t_handle_constraints: constraint_lengths dict: {constraint_lengths}")
def _determine_interaction_group(self, atoms_in_interaction):
"""
This method determines which interaction group the interaction should fall under. There are four groups:
Those involving unique old atoms: any interaction involving unique old atoms should be completely on at lambda=0
and completely off at lambda=1
Those involving unique new atoms: any interaction involving unique new atoms should be completely off at lambda=0
and completely on at lambda=1
Those involving core atoms and/or environment atoms: These interactions change their type, and should be the old
character at lambda=0, and the new character at lambda=1
Those involving only environment atoms: These interactions are unmodified.
Parameters
----------
atoms_in_interaction : list of int
List of (hybrid) indices of the atoms in this interaction
Returns
-------
interaction_group : InteractionGroup enum
The group to which this interaction should be assigned
"""
# Make the interaction list a set to facilitate operations
atom_interaction_set = set(atoms_in_interaction)
# Check if the interaction contains unique old atoms
if len(atom_interaction_set.intersection(self._atom_classes['unique_old_atoms'])) > 0:
return InteractionGroup.unique_old
# Do the same for new atoms
elif len(atom_interaction_set.intersection(self._atom_classes['unique_new_atoms'])) > 0:
return InteractionGroup.unique_new
# If the interaction set is a strict subset of the environment atoms, then it is in the environment group
# and should not be alchemically modified at all.
elif atom_interaction_set.issubset(self._atom_classes['environment_atoms']):
return InteractionGroup.environment
# Having covered the cases of all-environment, unique old-containing, and unique-new-containing, anything else
# should belong to the last class--contains core atoms but not any unique atoms.
else:
return InteractionGroup.core
def _add_bond_force_terms(self):
"""
This function adds the appropriate bond forces to the system (according to groups defined above). Note that it
does _not_ add the particles to the force. It only adds the force to facilitate another method adding the
particles to the force.
"""
core_energy_expression = '(K/2)*(r-length)^2;'
core_energy_expression += 'K = (1-lambda_bonds)*K1 + lambda_bonds*K2;' # linearly interpolate spring constant
core_energy_expression += 'length = (1-lambda_bonds)*length1 + lambda_bonds*length2;' # linearly interpolate bond length
if self._has_functions:
try:
core_energy_expression += 'lambda_bonds = ' + self._functions['lambda_bonds']
except KeyError as e:
print("Functions were provided, but no term was provided for the bonds")
raise e
# Create the force and add the relevant parameters
custom_core_force = openmm.CustomBondForce(core_energy_expression)
custom_core_force.addPerBondParameter('length1') # old bond length
custom_core_force.addPerBondParameter('K1') # old spring constant
custom_core_force.addPerBondParameter('length2') # new bond length
custom_core_force.addPerBondParameter('K2') # new spring constant
if self._has_functions:
custom_core_force.addGlobalParameter('lambda', 0.0)
custom_core_force.addEnergyParameterDerivative('lambda')
else:
custom_core_force.addGlobalParameter('lambda_bonds', 0.0)
self._hybrid_system.addForce(custom_core_force)
self._hybrid_system_forces['core_bond_force'] = custom_core_force
# Add a bond force for environment and unique atoms (bonds are never scaled for these):
standard_bond_force = openmm.HarmonicBondForce()
self._hybrid_system.addForce(standard_bond_force)
self._hybrid_system_forces['standard_bond_force'] = standard_bond_force
def _add_angle_force_terms(self):
"""
This function adds the appropriate angle force terms to the hybrid system. It does not add particles
or parameters to the force; this is done elsewhere.
"""
energy_expression = '(K/2)*(theta-theta0)^2;'
energy_expression += 'K = (1.0-lambda_angles)*K_1 + lambda_angles*K_2;' # linearly interpolate spring constant
energy_expression += 'theta0 = (1.0-lambda_angles)*theta0_1 + lambda_angles*theta0_2;' # linearly interpolate equilibrium angle
if self._has_functions:
try:
energy_expression += 'lambda_angles = ' + self._functions['lambda_angles']
except KeyError as e:
print("Functions were provided, but no term was provided for the angles")
raise e
# Create the force and add relevant parameters
custom_core_force = openmm.CustomAngleForce(energy_expression)
custom_core_force.addPerAngleParameter('theta0_1') # molecule1 equilibrium angle
custom_core_force.addPerAngleParameter('K_1') # molecule1 spring constant
custom_core_force.addPerAngleParameter('theta0_2') # molecule2 equilibrium angle
custom_core_force.addPerAngleParameter('K_2') # molecule2 spring constant
# Create the force for neglected angles and relevant parameters; the K_1 term will be set to 0
if len(self.neglected_new_angle_terms) > 0: # if there is at least one neglected angle term from the geometry engine
_logger.info("\t_add_angle_force_terms: there are > 0 neglected new angles: adding CustomAngleForce")
custom_neglected_new_force = openmm.CustomAngleForce(energy_expression)
custom_neglected_new_force.addPerAngleParameter('theta0_1') # molecule1 equilibrium angle
custom_neglected_new_force.addPerAngleParameter('K_1') # molecule1 spring constant
custom_neglected_new_force.addPerAngleParameter('theta0_2') # molecule2 equilibrium angle
custom_neglected_new_force.addPerAngleParameter('K_2') # molecule2 spring constant
if len(self.neglected_old_angle_terms) > 0: # if there is at least one neglected angle term from the geometry engine
_logger.info("\t_add_angle_force_terms: there are > 0 neglected old angles: adding CustomAngleForce")
custom_neglected_old_force = openmm.CustomAngleForce(energy_expression)
custom_neglected_old_force.addPerAngleParameter('theta0_1') # molecule1 equilibrium angle
custom_neglected_old_force.addPerAngleParameter('K_1') # molecule1 spring constant
custom_neglected_old_force.addPerAngleParameter('theta0_2') # molecule2 equilibrium angle
custom_neglected_old_force.addPerAngleParameter('K_2') # molecule2 spring constant
if self._has_functions:
custom_core_force.addGlobalParameter('lambda', 0.0)
custom_core_force.addEnergyParameterDerivative('lambda')
if len(self.neglected_new_angle_terms) > 0:
custom_neglected_new_force.addGlobalParameter('lambda', 0.0)
custom_neglected_new_force.addEnergyParameterDerivative('lambda')
if len(self.neglected_old_angle_terms) > 0:
custom_neglected_old_force.addGlobalParameter('lambda', 0.0)
custom_neglected_old_force.addEnergyParameterDerivative('lambda')
else:
custom_core_force.addGlobalParameter('lambda_angles', 0.0)
if len(self.neglected_new_angle_terms) > 0:
custom_neglected_new_force.addGlobalParameter('lambda_angles', 0.0)
if len(self.neglected_old_angle_terms) > 0:
custom_neglected_old_force.addGlobalParameter('lambda_angles', 0.0)
# Add the force to the system and the force dict.
self._hybrid_system.addForce(custom_core_force)
self._hybrid_system_forces['core_angle_force'] = custom_core_force
if len(self.neglected_new_angle_terms) > 0:
self._hybrid_system.addForce(custom_neglected_new_force)
self._hybrid_system_forces['custom_neglected_new_angle_force'] = custom_neglected_new_force
if len(self.neglected_old_angle_terms) > 0:
self._hybrid_system.addForce(custom_neglected_old_force)
self._hybrid_system_forces['custom_neglected_old_angle_force'] = custom_neglected_old_force
# Add an angle term for environment/unique interactions--these are never scaled
standard_angle_force = openmm.HarmonicAngleForce()
self._hybrid_system.addForce(standard_angle_force)
self._hybrid_system_forces['standard_angle_force'] = standard_angle_force
def _add_torsion_force_terms(self, add_custom_core_force=True, add_unique_atom_torsion_force=True):
"""
This function adds the appropriate PeriodicTorsionForce terms to the system. Core torsions are interpolated,
while environment and unique torsions are always on.
"""
energy_expression = '(1-lambda_torsions)*U1 + lambda_torsions*U2;'
energy_expression += 'U1 = K1*(1+cos(periodicity1*theta-phase1));'
energy_expression += 'U2 = K2*(1+cos(periodicity2*theta-phase2));'
if self._has_functions:
try:
energy_expression += 'lambda_torsions = ' + self._functions['lambda_torsions']
except KeyError as e:
print("Functions were provided, but no term was provided for torsions")
raise e
# Create the force and add the relevant parameters
custom_core_force = openmm.CustomTorsionForce(energy_expression)
custom_core_force.addPerTorsionParameter('periodicity1') # molecule1 periodicity
custom_core_force.addPerTorsionParameter('phase1') # molecule1 phase
custom_core_force.addPerTorsionParameter('K1') # molecule1 spring constant
custom_core_force.addPerTorsionParameter('periodicity2') # molecule2 periodicity
custom_core_force.addPerTorsionParameter('phase2') # molecule2 phase
custom_core_force.addPerTorsionParameter('K2') # molecule2 spring constant
if self._has_functions:
custom_core_force.addGlobalParameter('lambda', 0.0)
custom_core_force.addEnergyParameterDerivative('lambda')
else:
custom_core_force.addGlobalParameter('lambda_torsions', 0.0)
# Add the force to the system
if add_custom_core_force:
self._hybrid_system.addForce(custom_core_force)
self._hybrid_system_forces['custom_torsion_force'] = custom_core_force
# Create and add the torsion term for unique/environment atoms
if add_unique_atom_torsion_force:
unique_atom_torsion_force = openmm.PeriodicTorsionForce()
self._hybrid_system.addForce(unique_atom_torsion_force)
self._hybrid_system_forces['unique_atom_torsion_force'] = unique_atom_torsion_force
def _add_nonbonded_force_terms(self, add_custom_sterics_force=True):
"""
Add the nonbonded force terms to the hybrid system. Note that as with the other forces,
this method does not add any interactions. It only sets up the forces.
Parameters
----------
nonbonded_method : int
One of the openmm.NonbondedForce nonbonded methods.
"""
# Add a regular nonbonded force for all interactions that are not changing.
standard_nonbonded_force = openmm.NonbondedForce()
self._hybrid_system.addForce(standard_nonbonded_force)
_logger.info(f"\t_add_nonbonded_force_terms: {standard_nonbonded_force} added to hybrid system")
self._hybrid_system_forces['standard_nonbonded_force'] = standard_nonbonded_force
# Create a CustomNonbondedForce to handle alchemically interpolated nonbonded parameters.
# Select functional form based on nonbonded method.
# TODO: check _nonbonded_custom_ewald and _nonbonded_custom_cutoff since they take arguments that are never used...
if self._nonbonded_method in [openmm.NonbondedForce.NoCutoff]:
_logger.info("\t_add_nonbonded_force_terms: nonbonded_method is NoCutoff")
sterics_energy_expression = self._nonbonded_custom(self._softcore_LJ_v2)
elif self._nonbonded_method in [openmm.NonbondedForce.CutoffPeriodic, openmm.NonbondedForce.CutoffNonPeriodic]:
_logger.info("\t_add_nonbonded_force_terms: nonbonded_method is Cutoff(Periodic or NonPeriodic)")
epsilon_solvent = self._old_system_forces['NonbondedForce'].getReactionFieldDielectric()
r_cutoff = self._old_system_forces['NonbondedForce'].getCutoffDistance()
sterics_energy_expression = self._nonbonded_custom(self._softcore_LJ_v2)
standard_nonbonded_force.setReactionFieldDielectric(epsilon_solvent)
standard_nonbonded_force.setCutoffDistance(r_cutoff)
elif self._nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]:
_logger.info("\t_add_nonbonded_force_terms: nonbonded_method is PME or Ewald")
[alpha_ewald, nx, ny, nz] = self._old_system_forces['NonbondedForce'].getPMEParameters()
delta = self._old_system_forces['NonbondedForce'].getEwaldErrorTolerance()
r_cutoff = self._old_system_forces['NonbondedForce'].getCutoffDistance()
sterics_energy_expression = self._nonbonded_custom(self._softcore_LJ_v2)
standard_nonbonded_force.setPMEParameters(alpha_ewald, nx, ny, nz)
standard_nonbonded_force.setEwaldErrorTolerance(delta)
standard_nonbonded_force.setCutoffDistance(r_cutoff)
else:
raise Exception("Nonbonded method %s not supported yet." % str(self._nonbonded_method))
standard_nonbonded_force.setNonbondedMethod(self._nonbonded_method)
_logger.info(f"\t_add_nonbonded_force_terms: {self._nonbonded_method} added to standard nonbonded force")
sterics_energy_expression += self._nonbonded_custom_sterics_common()
sterics_mixing_rules = self._nonbonded_custom_mixing_rules()
custom_nonbonded_method = self._translate_nonbonded_method_to_custom(self._nonbonded_method)
total_sterics_energy = "U_sterics;" + sterics_energy_expression + sterics_mixing_rules
if self._has_functions:
try:
total_sterics_energy += 'lambda_sterics = ' + self._functions['lambda_sterics']
except KeyError as e:
print("Functions were provided, but there is no entry for sterics")
raise e
sterics_custom_nonbonded_force = openmm.CustomNonbondedForce(total_sterics_energy)
if self._softcore_LJ_v2:
sterics_custom_nonbonded_force.addGlobalParameter("softcore_alpha", self._softcore_LJ_v2_alpha)
else:
sterics_custom_nonbonded_force.addGlobalParameter("softcore_alpha", self.softcore_alpha)
sterics_custom_nonbonded_force.addPerParticleParameter("sigmaA") # Lennard-Jones sigma initial
sterics_custom_nonbonded_force.addPerParticleParameter("epsilonA") # Lennard-Jones epsilon initial
sterics_custom_nonbonded_force.addPerParticleParameter("sigmaB") # Lennard-Jones sigma final
sterics_custom_nonbonded_force.addPerParticleParameter("epsilonB") # Lennard-Jones epsilon final
sterics_custom_nonbonded_force.addPerParticleParameter("unique_old") # 1 = hybrid old atom, 0 otherwise
sterics_custom_nonbonded_force.addPerParticleParameter("unique_new") # 1 = hybrid new atom, 0 otherwise
if self._has_functions:
sterics_custom_nonbonded_force.addGlobalParameter('lambda', 0.0)
sterics_custom_nonbonded_force.addEnergyParameterDerivative('lambda')
else:
sterics_custom_nonbonded_force.addGlobalParameter("lambda_sterics_core", 0.0)
sterics_custom_nonbonded_force.addGlobalParameter("lambda_electrostatics_core", 0.0)
sterics_custom_nonbonded_force.addGlobalParameter("lambda_sterics_insert", 0.0)
sterics_custom_nonbonded_force.addGlobalParameter("lambda_sterics_delete", 0.0)
sterics_custom_nonbonded_force.setNonbondedMethod(custom_nonbonded_method)
_logger.info(f"\t_add_nonbonded_force_terms: {custom_nonbonded_method} added to sterics_custom_nonbonded force")
if add_custom_sterics_force:
self._hybrid_system.addForce(sterics_custom_nonbonded_force)
self._hybrid_system_forces['core_sterics_force'] = sterics_custom_nonbonded_force
_logger.info(f"\t_add_nonbonded_force_terms: {sterics_custom_nonbonded_force} added to hybrid system")
# Set the use of dispersion correction to be the same between the new nonbonded force and the old one:
# These will be ignored from the _logger for the time being
if self._old_system_forces['NonbondedForce'].getUseDispersionCorrection():
self._hybrid_system_forces['standard_nonbonded_force'].setUseDispersionCorrection(True)
if self._use_dispersion_correction:
sterics_custom_nonbonded_force.setUseLongRangeCorrection(True)
else:
self._hybrid_system_forces['standard_nonbonded_force'].setUseDispersionCorrection(False)
if self._old_system_forces['NonbondedForce'].getUseSwitchingFunction():
switching_distance = self._old_system_forces['NonbondedForce'].getSwitchingDistance()
standard_nonbonded_force.setUseSwitchingFunction(True)
standard_nonbonded_force.setSwitchingDistance(switching_distance)
sterics_custom_nonbonded_force.setUseSwitchingFunction(True)
sterics_custom_nonbonded_force.setSwitchingDistance(switching_distance)
else:
standard_nonbonded_force.setUseSwitchingFunction(False)
sterics_custom_nonbonded_force.setUseSwitchingFunction(False)
def _nonbonded_custom_sterics_common(self):
"""
Get a custom sterics expression using amber softcore expression
Returns
-------
sterics_addition : str
The common softcore sterics energy expression
"""
sterics_addition = "epsilon = (1-lambda_sterics)*epsilonA + lambda_sterics*epsilonB;" # interpolation
sterics_addition += "reff_sterics = sigma*((softcore_alpha*lambda_alpha + (r/sigma)^6))^(1/6);" # effective softcore distance for sterics
sterics_addition += "sigma = (1-lambda_sterics)*sigmaA + lambda_sterics*sigmaB;"
sterics_addition += "lambda_alpha = new_interaction*(1-lambda_sterics_insert) + old_interaction*lambda_sterics_delete;"
sterics_addition += "lambda_sterics = core_interaction*lambda_sterics_core + new_interaction*lambda_sterics_insert + old_interaction*lambda_sterics_delete;"
sterics_addition += "core_interaction = delta(unique_old1+unique_old2+unique_new1+unique_new2);new_interaction = max(unique_new1, unique_new2);old_interaction = max(unique_old1, unique_old2);"
return sterics_addition
def _nonbonded_custom(self, v2):
"""
Get a part of the nonbonded energy expression when there is no cutoff.
Returns
-------
sterics_energy_expression : str
The energy expression for U_sterics
electrostatics_energy_expression : str
The energy expression for electrostatics
"""
# Soft-core Lennard-Jones
if v2:
sterics_energy_expression = "U_sterics = select(step(r - r_LJ), 4*epsilon*x*(x-1.0), U_sterics_quad);"
sterics_energy_expression += f"U_sterics_quad = Force*(((r - r_LJ)^2)/2 - (r - r_LJ)) + U_sterics_cut;"
sterics_energy_expression += f"U_sterics_cut = 4*epsilon*((sigma/r_LJ)^6)*(((sigma/r_LJ)^6) - 1.0);"
sterics_energy_expression += f"Force = -4*epsilon*((-12*sigma^12)/(r_LJ^13) + (6*sigma^6)/(r_LJ^7));"
sterics_energy_expression += f"x = (sigma/r)^6;"
sterics_energy_expression += f"r_LJ = softcore_alpha*((26/7)*(sigma^6)*lambda_sterics_deprecated)^(1/6);"
sterics_energy_expression += f"lambda_sterics_deprecated = new_interaction*(1.0 - lambda_sterics_insert) + old_interaction*lambda_sterics_delete;"
else:
sterics_energy_expression = "U_sterics = 4*epsilon*x*(x-1.0); x = (sigma/reff_sterics)^6;"
return sterics_energy_expression
def _nonbonded_custom_mixing_rules(self):
"""
Mixing rules for the custom nonbonded force.
Returns
-------
sterics_mixing_rules : str
The mixing expression for sterics
electrostatics_mixing_rules : str
The mixiing rules for electrostatics
"""
# Define mixing rules.
sterics_mixing_rules = "epsilonA = sqrt(epsilonA1*epsilonA2);" # mixing rule for epsilon
sterics_mixing_rules += "epsilonB = sqrt(epsilonB1*epsilonB2);" # mixing rule for epsilon
sterics_mixing_rules += "sigmaA = 0.5*(sigmaA1 + sigmaA2);" # mixing rule for sigma
sterics_mixing_rules += "sigmaB = 0.5*(sigmaB1 + sigmaB2);" # mixing rule for sigma
return sterics_mixing_rules
def _find_bond_parameters(self, bond_force, index1, index2):
"""
This is a convenience function to find bond parameters in another system given the two indices.
Parameters
----------
bond_force : openmm.HarmonicBondForce
The bond force where the parameters should be found
index1 : int
Index1 (order does not matter) of the bond atoms
index2 : int
Index2 (order does not matter) of the bond atoms
Returns
-------
bond_parameters : list
List of relevant bond parameters
"""
index_set = {index1, index2}
# Loop through all the bonds:
for bond_index in range(bond_force.getNumBonds()):
parms = bond_force.getBondParameters(bond_index)
if index_set=={parms[0], parms[1]}:
return parms
return []
def handle_harmonic_bonds(self):
"""
This method adds the appropriate interaction for all bonds in the hybrid system. The scheme used is:
1) If the two atoms are both in the core, then we add to the CustomBondForce and interpolate between the two
parameters
2) If one of the atoms is in core and the other is environment, we have to assert that the bond parameters do not change between
the old and the new system; then, the parameters are added to the regular bond force
3) Otherwise, we add the bond to a regular bond force.
"""
old_system_bond_force = self._old_system_forces['HarmonicBondForce']
new_system_bond_force = self._new_system_forces['HarmonicBondForce']
# Make a dict to check the environment-core bonds for consistency between the old and new systems
# key: hybrid_index_set, value: [(r0_old, k_old)]
old_core_env_indices = {}
# First, loop through the old system bond forces and add relevant terms
_logger.info("\thandle_harmonic_bonds: looping through old_system to add relevant terms...")
for bond_index in range(old_system_bond_force.getNumBonds()):
# Get each set of bond parameters
[index1_old, index2_old, r0_old, k_old] = old_system_bond_force.getBondParameters(bond_index)
_logger.debug(f"\t\thandle_harmonic_bonds: old bond_index {bond_index} with old indices {index1_old, index2_old}")
# Map the indices to the hybrid system, for which our atom classes are defined.
index1_hybrid = self._old_to_hybrid_map[index1_old]
index2_hybrid = self._old_to_hybrid_map[index2_old]
index_set = {index1_hybrid, index2_hybrid}
# Now check if it is a subset of the core atoms (that is, both atoms are in the core)
# If it is, we need to find the parameters in the old system so that we can interpolate
if index_set.issubset(self._atom_classes['core_atoms']):
_logger.debug(f"\t\thandle_harmonic_bonds: bond_index {bond_index} is a core (to custom bond force).")
index1_new = self._topology_proposal.old_to_new_atom_map[index1_old]
index2_new = self._topology_proposal.old_to_new_atom_map[index2_old]
new_bond_parameters = self._find_bond_parameters(new_system_bond_force, index1_new, index2_new)
if not new_bond_parameters:
r0_new = r0_old
k_new = 0.0*unit.kilojoule_per_mole/unit.angstrom**2
else:
[index1, index2, r0_new, k_new] = self._find_bond_parameters(new_system_bond_force, index1_new, index2_new)
self._hybrid_system_forces['core_bond_force'].addBond(index1_hybrid, index2_hybrid,[r0_old, k_old, r0_new, k_new])
# Check if the index set is a subset of anything besides environemnt (in the case of environment, we just add the bond to the regular bond force)
# that would mean that this bond is core-unique_old or unique_old-unique_old
elif index_set.issubset(self._atom_classes['unique_old_atoms']) or (len(index_set.intersection(self._atom_classes['unique_old_atoms'])) == 1 and len(index_set.intersection(self._atom_classes['core_atoms'])) == 1):
_logger.debug(f"\t\thandle_harmonic_bonds: bond_index {bond_index} is a core-unique_old or unique_old-unique old...")
# If we're not softening bonds, we can just add it to the regular bond force. Likewise if we are only softening new bonds
if not self._soften_bonds or self._soften_only_new:
_logger.debug(f"\t\t\thandle_harmonic_bonds: no softening (to standard bond force)")
self._hybrid_system_forces['standard_bond_force'].addBond(index1_hybrid, index2_hybrid, r0_old,
k_old)
# Otherwise, we will need to soften one of the endpoints. For unique old atoms, the softening endpoint is at lambda =1
else:
r0_new = r0_old # The bond length won't change
k_new = self._bond_softening_constant * k_old # We multiply the endpoint by the bond softening constant
# Now we add to the core bond force, since that is an alchemically-modified force.
self._hybrid_system_forces['core_bond_force'].addBond(index1_hybrid, index2_hybrid,
[r0_old, k_old, r0_new, k_new])
elif len(index_set.intersection(self._atom_classes['environment_atoms'])) == 1 and len(index_set.intersection(self._atom_classes['core_atoms'])) == 1:
_logger.debug(f"\t\thandle_harmonic_bonds: bond_index {bond_index} is an environment-core...")
self._hybrid_system_forces['standard_bond_force'].addBond(index1_hybrid, index2_hybrid, r0_old, k_old)
# Otherwise, we just add the same parameters as those in the old system (these are environment atoms, and the parameters are the same)
elif index_set.issubset(self._atom_classes['environment_atoms']):
_logger.debug(f"\t\thandle_harmonic_bonds: bond_index {bond_index} is an environment (to standard bond force).")
self._hybrid_system_forces['standard_bond_force'].addBond(index1_hybrid, index2_hybrid, r0_old, k_old)
else:
raise Exception(f"\t\thybrid index set {index_set} does not fit into a canonical atom type")
# Now loop through the new system to get the interactions that are unique to it.
_logger.info("\thandle_harmonic_bonds: looping through new_system to add relevant terms...")
for bond_index in range(new_system_bond_force.getNumBonds()):
# Get each set of bond parameters
[index1_new, index2_new, r0_new, k_new] = new_system_bond_force.getBondParameters(bond_index)
_logger.debug(f"\t\thandle_harmonic_bonds: new bond_index {bond_index} with new indices {index1_new, index2_new}")
# Convert indices to hybrid, since that is how we represent atom classes:
index1_hybrid = self._new_to_hybrid_map[index1_new]
index2_hybrid = self._new_to_hybrid_map[index2_new]
index_set = {index1_hybrid, index2_hybrid}
# If the intersection of this set and unique new atoms contains anything, the bond is unique to the new system and must be added
# all other bonds in the new system have been accounted for already.
if len(index_set.intersection(self._atom_classes['unique_new_atoms'])) == 2 or (len(index_set.intersection(self._atom_classes['unique_new_atoms'])) == 1 and len(index_set.intersection(self._atom_classes['core_atoms'])) == 1):
_logger.debug(f"\t\thandle_harmonic_bonds: bond_index {bond_index} is a core-unique_new or unique_new-unique_new...")
# If we are softening bonds, we have to use the core bond force, and scale the force constant at lambda = 0:
if self._soften_bonds:
_logger.debug(f"\t\t\thandle_harmonic_bonds: softening (to custom bond force)")
r0_old = r0_new # Do not change the length
k_old = k_new * self._bond_softening_constant # Scale the force constant by the requested parameter
# Now we add to the core bond force, since that is an alchemically-modified force.
self._hybrid_system_forces['core_bond_force'].addBond(index1_hybrid, index2_hybrid,
[r0_old, k_old, r0_new, k_new])
# If we aren't softening bonds, then just add it to the standard bond force
else:
_logger.debug(f"\t\t\thandle_harmonic_bonds: no softening (to standard bond force)")
self._hybrid_system_forces['standard_bond_force'].addBond(index1_hybrid, index2_hybrid, r0_new, k_new)
# If the bond is in the core, it has probably already been added in the above loop. However, there are some circumstances
# where it was not (closing a ring). In that case, the bond has not been added and should be added here.
# This has some peculiarities to be discussed...
elif index_set.issubset(self._atom_classes['core_atoms']):
if not self._find_bond_parameters(self._hybrid_system_forces['core_bond_force'], index1_hybrid, index2_hybrid):
_logger.debug(f"\t\thandle_harmonic_bonds: bond_index {bond_index} is a SPECIAL core-core (to custom bond force).")
r0_old = r0_new
k_old = 0.0*unit.kilojoule_per_mole/unit.angstrom**2
self._hybrid_system_forces['core_bond_force'].addBond(index1_hybrid, index2_hybrid,
[r0_old, k_old, r0_new, k_new])
elif index_set.issubset(self._atom_classes['environment_atoms']):
# Already been added
pass
elif len(index_set.intersection(self._atom_classes['environment_atoms'])) == 1 and len(index_set.intersection(self._atom_classes['core_atoms'])) == 1:
_logger.debug(f"\t\thandle_harmonic_bonds: bond_index {bond_index} is an environemnt-core; this has been previously added")
else:
raise Exception(f"\t\thybrid index set {index_set} does not fit into a canonical atom type")
def _find_angle_parameters(self, angle_force, indices):
"""
Convenience function to find the angle parameters corresponding to a particular set of indices
Parameters
----------
angle_force : openmm.HarmonicAngleForce
The force where the angle of interest may be found.
indices : list of int
The indices (any order) of the angle atoms
Returns
-------
angle_parameters : list
list of angle parameters
"""
#index_set = set(indices)
indices_reversed = indices[::-1]
# Now loop through and try to find the angle:
for angle_index in range(angle_force.getNumAngles()):
angle_parameters = angle_force.getAngleParameters(angle_index)
# Get a set representing the angle indices
angle_parameter_indices = angle_parameters[:3]
if indices == angle_parameter_indices or indices_reversed == angle_parameter_indices:
return angle_parameters
return [] # Return empty if no matching angle found
def _find_torsion_parameters(self, torsion_force, indices):
"""
Convenience function to find the torsion parameters corresponding to a particular set of indices.
Parameters
----------
torsion_force : openmm.PeriodicTorsionForce
torsion force where the torsion of interest may be found
indices : list of int
The indices of the atoms of the torsion
Returns
-------
torsion_parameters : list
torsion parameters
"""
#index_set = set(indices)
indices_reversed = indices[::-1]
torsion_parameters_list = list()
# Now loop through and try to find the torsion:
for torsion_index in range(torsion_force.getNumTorsions()):
torsion_parameters = torsion_force.getTorsionParameters(torsion_index)
# Get a set representing the torsion indices:
torsion_parameter_indices = torsion_parameters[:4]
if indices == torsion_parameter_indices or indices_reversed == torsion_parameter_indices:
torsion_parameters_list.append(torsion_parameters)
return torsion_parameters_list
def handle_harmonic_angles(self):
"""
This method adds the appropriate interaction for all angles in the hybrid system. The scheme used, as with bonds, is:
1) If the three atoms are all in the core, then we add to the CustomAngleForce and interpolate between the two
parameters
2) If the three atoms contain at least one unique new, check if the angle is in the neglected new list, and if so, interpolate from K_1 = 0;
else, if the three atoms contain at least one unique old, check if the angle is in the neglected old list, and if so, interpolate from K_2 = 0.
3) If the angle contains at least one environment and at least one core atom, assert there are no unique new atoms and that the angle terms
are preserved between the new and the old system. Then add to the standard angle force
4) Otherwise, we add the angle to a regular angle force since it is environment.
"""
old_system_angle_force = self._old_system_forces['HarmonicAngleForce']
new_system_angle_force = self._new_system_forces['HarmonicAngleForce']
# Make a dict to check the angles involving environment-core bonds for consistency between the old and new systems
# key: hybrid_index_set, value: [(theta0, k0)]
# First, loop through all the angles in the old system to determine what to do with them. We will only use the
# custom angle force if all atoms are part of "core." Otherwise, they are either unique to one system or never
# change.
_logger.info("\thandle_harmonic_angles: looping through old_system to add relevant terms...")
for angle_index in range(old_system_angle_force.getNumAngles()):
old_angle_parameters = old_system_angle_force.getAngleParameters(angle_index)
_logger.debug(f"\t\thandle_harmonic_angles: old angle_index {angle_index} with old indices {old_angle_parameters[:3]}")
# Get the indices in the hybrid system
hybrid_index_list = [self._old_to_hybrid_map[old_atomid] for old_atomid in old_angle_parameters[:3]]
hybrid_index_set = set(hybrid_index_list)
# If all atoms are in the core, we'll need to find the corresponding parameters in the old system and
# interpolate
if hybrid_index_set.issubset(self._atom_classes['core_atoms']):
_logger.debug(f"\t\thandle_harmonic_angles: angle_index {angle_index} is a core (to custom angle force).")
# Get the new indices so we can get the new angle parameters
new_indices = [self._topology_proposal.old_to_new_atom_map[old_atomid] for old_atomid in old_angle_parameters[:3]]
new_angle_parameters = self._find_angle_parameters(new_system_angle_force, new_indices)
if not new_angle_parameters:
new_angle_parameters = [0, 0, 0, old_angle_parameters[3], 0.0*unit.kilojoule_per_mole/unit.radian**2]
# Add to the hybrid force:
# the parameters at indices 3 and 4 represent theta0 and k, respectively.
hybrid_force_parameters = [old_angle_parameters[3], old_angle_parameters[4], new_angle_parameters[3], new_angle_parameters[4]]
self._hybrid_system_forces['core_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters)
# Check if the atoms are neither all core nor all environment, which would mean they involve unique old interactions
elif not hybrid_index_set.issubset(self._atom_classes['environment_atoms']):
if hybrid_index_set.intersection(self._atom_classes['environment_atoms']) != set(): #if there is an environment atom
_logger.debug(f"\t\thandle_harmonic_angles: angle_index {angle_index} contains an environment atom")
assert hybrid_index_set.intersection(self._atom_classes['unique_old_atoms']) == set(), f"we disallow unique-environment terms"
self._hybrid_system_forces['standard_angle_force'].addAngle(hybrid_index_list[0],
hybrid_index_list[1],
hybrid_index_list[2],
old_angle_parameters[3],
old_angle_parameters[4])
else:
_logger.debug(f"\t\thandle_harmonic_angles: angle_index {angle_index} is a core with unique_old...")
# There are no env atoms, so we can treat this term appropriately
# Check if we are softening angles, and not softening only new angles:
if self._soften_angles and not self._soften_only_new:
_logger.debug(f"\t\t\thandle_harmonic_angles: softening (to custom angle force)")
# If we are, then we need to generate the softened parameters (at lambda=1 for old atoms)
# We do this by using the same equilibrium angle, and scaling the force constant at the non-interacting
# endpoint:
if angle_index in self.neglected_old_angle_terms:
_logger.debug("\t\t\tsoften angles on but angle is in neglected old, so softening constant is set to zero.")
hybrid_force_parameters = [old_angle_parameters[3], old_angle_parameters[4], old_angle_parameters[3], 0.0 * old_angle_parameters[4]]
self._hybrid_system_forces['custom_neglected_old_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters)
else:
_logger.debug(f"\t\t\thandle_harmonic_angles: softening (to custom angle force)")
hybrid_force_parameters = [old_angle_parameters[3], old_angle_parameters[4], old_angle_parameters[3], self._angle_softening_constant * old_angle_parameters[4]]
self._hybrid_system_forces['core_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters)
# If not, we can just add this to the standard angle force
else:
if angle_index in self.neglected_old_angle_terms:
_logger.debug(f"\t\t\tangle in neglected_old_angle_terms; K_2 is set to zero")
hybrid_force_parameters = [old_angle_parameters[3], old_angle_parameters[4], old_angle_parameters[3], 0.0 * old_angle_parameters[4]]
self._hybrid_system_forces['custom_neglected_old_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters)
else:
_logger.debug(f"\t\t\thandle_harmonic_bonds: no softening (to standard angle force)")
self._hybrid_system_forces['standard_angle_force'].addAngle(hybrid_index_list[0],
hybrid_index_list[1],
hybrid_index_list[2],
old_angle_parameters[3],
old_angle_parameters[4])
# Otherwise, only environment atoms are in this interaction, so add it to the standard angle force
elif hybrid_index_set.issubset(self._atom_classes['environment_atoms']):
_logger.debug(f"\t\thandle_harmonic_angles: angle_index {angle_index} is an environment (to standard angle force)")
self._hybrid_system_forces['standard_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1],
hybrid_index_list[2], old_angle_parameters[3],
old_angle_parameters[4])
else:
raise Exception(f"\t\thandle_harmonic_angles: angle_index {angle_index} does not fit a canonical form.")
# Finally, loop through the new system force to add any unique new angles
_logger.info("\thandle_harmonic_angles: looping through new_system to add relevant terms...")
for angle_index in range(new_system_angle_force.getNumAngles()):
new_angle_parameters = new_system_angle_force.getAngleParameters(angle_index)
_logger.debug(f"\t\thandle_harmonic_angles: new angle_index {angle_index} with new terms {new_angle_parameters[:3]}")
# Get the indices in the hybrid system
hybrid_index_list = [self._new_to_hybrid_map[new_atomid] for new_atomid in new_angle_parameters[:3]]
hybrid_index_set = set(hybrid_index_list)
# If the intersection of this hybrid set with the unique new atoms is nonempty, it must be added:
if len(hybrid_index_set.intersection(self._atom_classes['unique_new_atoms'])) > 0:
assert hybrid_index_set.intersection(self._atom_classes['environment_atoms']) == set(), f"we disallow angle terms with unique new and environment atoms"
_logger.debug(f"\t\thandle_harmonic_bonds: angle_index {angle_index} is a core-unique_new or unique_new-unique_new...")
# Check to see if we are softening angles:
if self._soften_angles:
_logger.info(f"\t\t\thandle_harmonic_bonds: softening (to custom angle force)")
if angle_index in self.neglected_new_angle_terms:
_logger.debug("\t\t\tsoften angles on but angle is in neglected new, so softening constant is set to zero.")
hybrid_force_parameters = [new_angle_parameters[3], new_angle_parameters[4] * 0.0, new_angle_parameters[3], new_angle_parameters[4]]
self._hybrid_system_forces['custom_neglected_new_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters)
else:
_logger.debug(f"\t\t\thandle_harmonic_angles: softening (to custom angle force)")
hybrid_force_parameters = [new_angle_parameters[3], new_angle_parameters[4] * self._angle_softening_constant, new_angle_parameters[3], new_angle_parameters[4]]
self._hybrid_system_forces['core_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1],
hybrid_index_list[2],
hybrid_force_parameters)
# Otherwise, just add to the nonalchemical force
else:
if angle_index in self.neglected_new_angle_terms:
_logger.debug(f"\t\t\tangle in neglected_new_angle_terms; K_1 is set to zero")
hybrid_force_parameters = [new_angle_parameters[3], 0.0 * new_angle_parameters[4], new_angle_parameters[3], new_angle_parameters[4]]
self._hybrid_system_forces['custom_neglected_new_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters)
else:
_logger.debug(f"\t\t\thandle_harmonic_bonds: no softening (to standard angle force)")
self._hybrid_system_forces['standard_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1],
hybrid_index_list[2], new_angle_parameters[3],
new_angle_parameters[4])
elif hybrid_index_set.issubset(self._atom_classes['core_atoms']):
_logger.debug(f"\t\thandle_harmonic_angles: angle_index {angle_index} is a core (to custom angle force).")
if not self._find_angle_parameters(self._hybrid_system_forces['core_angle_force'], hybrid_index_list):
_logger.debug(f"\t\t\thandle_harmonic_angles: angle_index {angle_index} NOT previously added...adding now...THERE IS A CONSIDERATION NOT BEING MADE!")
hybrid_force_parameters = [new_angle_parameters[3], 0.0*unit.kilojoule_per_mole/unit.radian**2, new_angle_parameters[3], new_angle_parameters[4]]
self._hybrid_system_forces['core_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1],
hybrid_index_list[2],
hybrid_force_parameters)
elif hybrid_index_set.issubset(self._atom_classes['environment_atoms']):
# We have already added the appropriate environmental atom terms
pass
elif hybrid_index_set.intersection(self._atom_classes['environment_atoms']) != set():
_logger.debug(f"\t\thandle_harmonic_angles: angle_index {angle_index} contains an environment atom; this as already been added")
assert hybrid_index_set.intersection(self._atom_classes['unique_new_atoms']) == set(), f"we disallow angle terms with unique new and environment atoms"
else:
raise Exception(f"\t\thybrid index list {hybrid_index_list} does not fit into a canonical atom set")
def handle_periodic_torsion_force(self):
"""
Handle the torsions defined in the new and old systems as such:
1. old system torsions will enter the ``custom_torsion_force`` if they do not contain ``unique_old_atoms`` and will interpolate from ``on`` to ``off`` from ``lambda_torsions`` = 0 to 1, respectively
2. new system torsions will enter the ``custom_torsion_force`` if they do not contain ``unique_new_atoms`` and will interpolate from ``off`` to ``on`` from ``lambda_torsions`` = 0 to 1, respectively
3. old *and* new system torsions will enter the ``unique_atom_torsion_force`` (``standard_torsion_force``) and will *not* be interpolated
"""
old_system_torsion_force = self._old_system_forces['PeriodicTorsionForce']
new_system_torsion_force = self._new_system_forces['PeriodicTorsionForce']
# auxiliary_custom_torsion_force = copy.deepcopy(self._hybrid_system_forces['custom_torsion_force'])
auxiliary_custom_torsion_force = []
old_custom_torsions_to_standard = []
# We need to keep track of what torsions we added so that we do not double count.
added_torsions = []
_logger.info("\thandle_periodic_torsion_forces: looping through old_system to add relevant terms...")
for torsion_index in range(old_system_torsion_force.getNumTorsions()):
torsion_parameters = old_system_torsion_force.getTorsionParameters(torsion_index)
_logger.debug(f"\t\thandle_harmonic_torsion_forces: old torsion_index {torsion_index} with old indices {torsion_parameters[:4]}")
#_logger.debug(f"\t\thandle_harmonic_torsion_forces: old_torsion parameters: {torsion_parameters}")
# Get the indices in the hybrid system
hybrid_index_list = [self._old_to_hybrid_map[old_index] for old_index in torsion_parameters[:4]]
hybrid_index_set = set(hybrid_index_list)
# If all atoms are in the core, we'll need to find the corresponding parameters in the old system and
# interpolate
if hybrid_index_set.intersection(self._atom_classes['unique_old_atoms']) != set():
if self._flatten_torsions:
force_params = [torsion_parameters[4], torsion_parameters[5], torsion_parameters[6], torsion_parameters[4], torsion_parameters[5], 0.]
self._hybrid_system_forces['custom_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], force_params)
else:
# Then it goes to a standard force...
self._hybrid_system_forces['unique_atom_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1],
hybrid_index_list[2], hybrid_index_list[3], torsion_parameters[4],
torsion_parameters[5], torsion_parameters[6])
else:
# It is a core-only term, an environment-only term, or a core/env term;
# in any case, it goes to the core torsion_force
torsion_indices = torsion_parameters[:4]
hybrid_force_parameters = [torsion_parameters[4], torsion_parameters[5], torsion_parameters[6], 0.0, 0.0, 0.0]
# self._hybrid_system_forces['custom_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters)
auxiliary_custom_torsion_force.append([hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters[:3]])
_logger.info("\thandle_periodic_torsion_forces: looping through new_system to add relevant terms...")
for torsion_index in range(new_system_torsion_force.getNumTorsions()):
torsion_parameters = new_system_torsion_force.getTorsionParameters(torsion_index)
_logger.debug(f"\t\thandle_harmonic_torsions: new torsion_index {torsion_index} with new indices {torsion_parameters[:4]}")
# Get the indices in the hybrid system:
hybrid_index_list = [self._new_to_hybrid_map[new_index] for new_index in torsion_parameters[:4]]
hybrid_index_set = set(hybrid_index_list)
if hybrid_index_set.intersection(self._atom_classes['unique_new_atoms']) != set():
if self._flatten_torsions:
force_params = [torsion_parameters[4], torsion_parameters[5], 0.0, torsion_parameters[4], torsion_parameters[5], torsion_parameters[6]]
self._hybrid_system_forces['custom_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], force_params)
else:
# Then it goes to the custom torsion force (scaled to zero)
self._hybrid_system_forces['unique_atom_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1],
hybrid_index_list[2], hybrid_index_list[3], torsion_parameters[4],
torsion_parameters[5], torsion_parameters[6])
else:
torsion_indices = torsion_parameters[:4]
hybrid_force_parameters = [0.0, 0.0, 0.0, torsion_parameters[4], torsion_parameters[5], torsion_parameters[6]]
# Check to see if this term is in the olds...
if [hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters[3:]] in auxiliary_custom_torsion_force:
# print('hooray!')
# Then this terms has to go to standard and be deleted...
old_index = auxiliary_custom_torsion_force.index([hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters[3:]])
old_custom_torsions_to_standard.append(old_index)
self._hybrid_system_forces['unique_atom_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1],
hybrid_index_list[2], hybrid_index_list[3], torsion_parameters[4],
torsion_parameters[5], torsion_parameters[6])
else:
# Then this term has to go to the core force...
self._hybrid_system_forces['custom_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters)
# auxiliary_custom_torsion_force.addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters[3:])
# Now we have to loop through the aux custom torsion force
# print(f"old_custom_torsions_to_standard: {old_custom_torsions_to_standard}")
for index in [q for q in range(len(auxiliary_custom_torsion_force)) if q not in old_custom_torsions_to_standard]:
terms = auxiliary_custom_torsion_force[index]
hybrid_index_list = terms[:4]
hybrid_force_parameters = terms[4] + [0., 0., 0.]
self._hybrid_system_forces['custom_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters)
def handle_nonbonded(self):
"""
"""
old_system_nonbonded_force = self._old_system_forces['NonbondedForce']
new_system_nonbonded_force = self._new_system_forces['NonbondedForce']
hybrid_to_old_map = self._hybrid_to_old_map
hybrid_to_new_map = self._hybrid_to_new_map
# Define new global parameters for NonbondedForce
self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter('lambda_electrostatics_core', 0.0)
self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter('lambda_sterics_core', 0.0)
self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter("lambda_electrostatics_delete", 0.0)
self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter("lambda_electrostatics_insert", 0.0)
# We have to loop through the particles in the system, because nonbonded force does not accept index
_logger.info("\thandle_nonbonded: looping through all particles in hybrid...")
for particle_index in range(self._hybrid_system.getNumParticles()):
if particle_index in self._atom_classes['unique_old_atoms']:
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is a unique_old")
# Get the parameters in the old system
old_index = hybrid_to_old_map[particle_index]
[charge, sigma, epsilon] = old_system_nonbonded_force.getParticleParameters(old_index)
# Add the particle to the hybrid custom sterics and electrostatics.
check_index = self._hybrid_system_forces['core_sterics_force'].addParticle([sigma, epsilon, sigma, 0.0*epsilon, 1, 0]) # turning off sterics in forward direction
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Add particle to the regular nonbonded force, but Lennard-Jones will be handled by CustomNonbondedForce
check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(charge, sigma, 0.0*epsilon) # add charge to standard_nonbonded force
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Charge will be turned off at lambda_electrostatics_delete = 0, on at lambda_electrostatics_delete = 1; kill charge with lambda_electrostatics_delete = 0 --> 1
self._hybrid_system_forces['standard_nonbonded_force'].addParticleParameterOffset('lambda_electrostatics_delete', particle_index, -charge, 0*sigma, 0*epsilon)
elif particle_index in self._atom_classes['unique_new_atoms']:
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is a unique_new")
# Get the parameters in the new system
new_index = hybrid_to_new_map[particle_index]
[charge, sigma, epsilon] = new_system_nonbonded_force.getParticleParameters(new_index)
# Add the particle to the hybrid custom sterics and electrostatics
check_index = self._hybrid_system_forces['core_sterics_force'].addParticle([sigma, 0.0*epsilon, sigma, epsilon, 0, 1]) # turning on sterics in forward direction
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Add particle to the regular nonbonded force, but Lennard-Jones will be handled by CustomNonbondedForce
check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(0.0, sigma, 0.0) # charge starts at zero
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Charge will be turned off at lambda_electrostatics_insert = 0, on at lambda_electrostatics_insert = 1; add charge with lambda_electrostatics_insert = 0 --> 1
self._hybrid_system_forces['standard_nonbonded_force'].addParticleParameterOffset('lambda_electrostatics_insert', particle_index, +charge, 0, 0)
elif particle_index in self._atom_classes['core_atoms']:
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is a core")
# Get the parameters in the new and old systems:
old_index = hybrid_to_old_map[particle_index]
[charge_old, sigma_old, epsilon_old] = old_system_nonbonded_force.getParticleParameters(old_index)
new_index = hybrid_to_new_map[particle_index]
[charge_new, sigma_new, epsilon_new] = new_system_nonbonded_force.getParticleParameters(new_index)
# Add the particle to the custom forces, interpolating between the two parameters; add steric params and zero electrostatics to core_sterics per usual
check_index = self._hybrid_system_forces['core_sterics_force'].addParticle([sigma_old, epsilon_old, sigma_new, epsilon_new, 0, 0])
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Still add the particle to the regular nonbonded force, but with zeroed out parameters; add old charge to standard_nonbonded and zero sterics
check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(charge_old, 0.5*(sigma_old+sigma_new), 0.0)
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Charge is charge_old at lambda_electrostatics = 0, charge_new at lambda_electrostatics = 1
# TODO: We could also interpolate the Lennard-Jones here instead of core_sterics force so that core_sterics_force could just be softcore
# interpolate between old and new charge with lambda_electrostatics core; make sure to keep sterics off
self._hybrid_system_forces['standard_nonbonded_force'].addParticleParameterOffset('lambda_electrostatics_core', particle_index, (charge_new - charge_old), 0, 0)
# Otherwise, the particle is in the environment
else:
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is an envronment")
# The parameters will be the same in new and old system, so just take the old parameters
old_index = hybrid_to_old_map[particle_index]
[charge, sigma, epsilon] = old_system_nonbonded_force.getParticleParameters(old_index)
# Add the particle to the hybrid custom sterics, but they dont change; electrostatics are ignored
self._hybrid_system_forces['core_sterics_force'].addParticle([sigma, epsilon, sigma, epsilon, 0, 0])
# Add the environment atoms to the regular nonbonded force as well: should we be adding steric terms here, too?
self._hybrid_system_forces['standard_nonbonded_force'].addParticle(charge, sigma, epsilon)
# Now loop pairwise through (unique_old, unique_new) and add exceptions so that they never interact electrostatically (place into Nonbonded Force)
unique_old_atoms = self._atom_classes['unique_old_atoms']
unique_new_atoms = self._atom_classes['unique_new_atoms']
for old in unique_old_atoms:
for new in unique_new_atoms:
self._hybrid_system_forces['standard_nonbonded_force'].addException(old, new, 0.0*unit.elementary_charge**2, 1.0*unit.nanometers, 0.0*unit.kilojoules_per_mole)
self._hybrid_system_forces['core_sterics_force'].addExclusion(old, new) # This is only necessary to avoid the 'All forces must have identical exclusions' rule
_logger.info("\thandle_nonbonded: Handling Interaction Groups...")
self._handle_interaction_groups()
_logger.info("\thandle_nonbonded: Handling Hybrid Exceptions...")
self._handle_hybrid_exceptions()
_logger.info("\thandle_nonbonded: Handling Original Exceptions...")
self._handle_original_exceptions()
def _generate_dict_from_exceptions(self, force):
"""
This is a utility function to generate a dictionary of the form
(particle1_idx, particle2_idx) : [exception parameters]. This will facilitate access and search of exceptions
Parameters
----------
force : openmm.NonbondedForce object
a force containing exceptions
Returns
-------
exceptions_dict : dict
Dictionary of exceptions
"""
exceptions_dict = {}
for exception_index in range(force.getNumExceptions()):
[index1, index2, chargeProd, sigma, epsilon] = force.getExceptionParameters(exception_index)
exceptions_dict[(index1, index2)] = [chargeProd, sigma, epsilon]
#_logger.debug(f"\t_generate_dict_from_exceptions: Exceptions Dict: {exceptions_dict}" )
return exceptions_dict
def _handle_interaction_groups(self):
"""
Create the appropriate interaction groups for the custom nonbonded forces. The groups are:
1) Unique-old - core
2) Unique-old - environment
3) Unique-new - core
4) Unique-new - environment
5) Core - environment
6) Core - core
Unique-old and Unique new are prevented from interacting this way, and intra-unique interactions occur in an
unmodified nonbonded force.
Must be called after particles are added to the Nonbonded forces
TODO: we should also be adding the following interaction groups...
7) Unique-new - Unique-new
8) Unique-old - Unique-old
"""
# Get the force objects for convenience:
sterics_custom_force = self._hybrid_system_forces['core_sterics_force']
# Also prepare the atom classes
core_atoms = self._atom_classes['core_atoms']
unique_old_atoms = self._atom_classes['unique_old_atoms']
unique_new_atoms = self._atom_classes['unique_new_atoms']
environment_atoms = self._atom_classes['environment_atoms']
sterics_custom_force.addInteractionGroup(unique_old_atoms, core_atoms)
sterics_custom_force.addInteractionGroup(unique_old_atoms, environment_atoms)
sterics_custom_force.addInteractionGroup(unique_new_atoms, core_atoms)
sterics_custom_force.addInteractionGroup(unique_new_atoms, environment_atoms)
sterics_custom_force.addInteractionGroup(core_atoms, environment_atoms)
sterics_custom_force.addInteractionGroup(core_atoms, core_atoms)
sterics_custom_force.addInteractionGroup(unique_new_atoms, unique_new_atoms)
sterics_custom_force.addInteractionGroup(unique_old_atoms, unique_old_atoms)
def _handle_hybrid_exceptions(self):
"""
Instead of excluding interactions that shouldn't occur, we provide exceptions for interactions that were zeroed
out but should occur.
"""
old_system_nonbonded_force = self._old_system_forces['NonbondedForce']
new_system_nonbonded_force = self._new_system_forces['NonbondedForce']
import itertools
# Prepare the atom classes
unique_old_atoms = self._atom_classes['unique_old_atoms']
unique_new_atoms = self._atom_classes['unique_new_atoms']
# Get the list of interaction pairs for which we need to set exceptions:
unique_old_pairs = list(itertools.combinations(unique_old_atoms, 2))
unique_new_pairs = list(itertools.combinations(unique_new_atoms, 2))
# Add back the interactions of the old unique atoms, unless there are exceptions
for atom_pair in unique_old_pairs:
# Since the pairs are indexed in the dictionary by the old system indices, we need to convert
old_index_atom_pair = (self._hybrid_to_old_map[atom_pair[0]], self._hybrid_to_old_map[atom_pair[1]])
# Now we check if the pair is in the exception dictionary
if old_index_atom_pair in self._old_system_exceptions:
_logger.debug(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {old_index_atom_pair} is an old system exception")
[chargeProd, sigma, epsilon] = self._old_system_exceptions[old_index_atom_pair]
if self._interpolate_14s: #if we are interpolating 1,4 exceptions then we have to
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd*0.0, sigma, epsilon*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1]) # Add exclusion to ensure exceptions are consistent
# Check if the pair is in the reverse order and use that if so
elif old_index_atom_pair[::-1] in self._old_system_exceptions:
_logger.debug(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {old_index_atom_pair[::-1]} is an old system exception")
[chargeProd, sigma, epsilon] = self._old_system_exceptions[old_index_atom_pair[::-1]]
if self._interpolate_14s: # If we are interpolating 1,4 exceptions then we have to
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd*0.0, sigma, epsilon*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1]) # Add exclusion to ensure exceptions are consistent
# If it's not handled by an exception in the original system, we just add the regular parameters as an exception
# TODO: this implies that the old-old nonbonded interactions (those which are not exceptions) are always self-interacting throughout lambda protocol...
# else:
# _logger.info(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {old_index_atom_pair} is NOT an old exception...perhaps this is a problem!")
# [charge0, sigma0, epsilon0] = self._old_system_forces['NonbondedForce'].getParticleParameters(old_index_atom_pair[0])
# [charge1, sigma1, epsilon1] = self._old_system_forces['NonbondedForce'].getParticleParameters(old_index_atom_pair[1])
# chargeProd = charge0*charge1
# epsilon = unit.sqrt(epsilon0*epsilon1)
# sigma = 0.5*(sigma0+sigma1)
# self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
# self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1]) # add exclusion to ensure exceptions are consistent
# Add back the interactions of the new unique atoms, unless there are exceptions
for atom_pair in unique_new_pairs:
# Since the pairs are indexed in the dictionary by the new system indices, we need to convert
new_index_atom_pair = (self._hybrid_to_new_map[atom_pair[0]], self._hybrid_to_new_map[atom_pair[1]])
# Now we check if the pair is in the exception dictionary
if new_index_atom_pair in self._new_system_exceptions:
_logger.debug(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {new_index_atom_pair} is a new system exception")
[chargeProd, sigma, epsilon] = self._new_system_exceptions[new_index_atom_pair]
if self._interpolate_14s:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd*0.0, sigma, epsilon*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1])
# Check if the pair is present in the reverse order and use that if so
elif new_index_atom_pair[::-1] in self._new_system_exceptions:
_logger.debug(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {new_index_atom_pair[::-1]} is a new system exception")
[chargeProd, sigma, epsilon] = self._new_system_exceptions[new_index_atom_pair[::-1]]
if self._interpolate_14s:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd*0.0, sigma, epsilon*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1])
# If it's not handled by an exception in the original system, we just add the regular parameters as an exception
# else:
# _logger.info(f"\t\thandle_nonbonded: _handle_hybrid_exceptions: {new_index_atom_pair} is NOT a new exception...perhaps this is a problem!")
# [charge0, sigma0, epsilon0] = self._new_system_forces['NonbondedForce'].getParticleParameters(new_index_atom_pair[0])
# [charge1, sigma1, epsilon1] = self._new_system_forces['NonbondedForce'].getParticleParameters(new_index_atom_pair[1])
# chargeProd = charge0*charge1
# epsilon = unit.sqrt(epsilon0*epsilon1)
# sigma = 0.5*(sigma0+sigma1)
# self._hybrid_system_forces['standard_nonbonded_force'].addException(atom_pair[0], atom_pair[1], chargeProd, sigma, epsilon)
# self._hybrid_system_forces['core_sterics_force'].addExclusion(atom_pair[0], atom_pair[1]) # add exclusion to ensure exceptions are consistent
def _handle_original_exceptions(self):
"""
This method ensures that exceptions present in the original systems are present in the hybrid appropriately.
"""
# Get what we need to find the exceptions from the new and old systems:
old_system_nonbonded_force = self._old_system_forces['NonbondedForce']
new_system_nonbonded_force = self._new_system_forces['NonbondedForce']
hybrid_to_old_map = {value: key for key, value in self._old_to_hybrid_map.items()}
hybrid_to_new_map = {value: key for key, value in self._new_to_hybrid_map.items()}
# First, loop through the old system's exceptions and add them to the hybrid appropriately:
_logger.debug(f"\tlooping over old system exceptions...")
for exception_pair, exception_parameters in self._old_system_exceptions.items():
[index1_old, index2_old] = exception_pair
[chargeProd_old, sigma_old, epsilon_old] = exception_parameters
# Get hybrid indices:
index1_hybrid = self._old_to_hybrid_map[index1_old]
index2_hybrid = self._old_to_hybrid_map[index2_old]
index_set = {index1_hybrid, index2_hybrid}
# In this case, the interaction is only covered by the regular nonbonded force, and as such will be copied to that force
# In the unique-old case, it is handled elsewhere due to internal peculiarities regarding exceptions
if index_set.issubset(self._atom_classes['environment_atoms']):
_logger.debug(f"\t\thandle_nonbonded: _handle_original_exceptions: {exception_pair} is an environment exception pair")
self._hybrid_system_forces['standard_nonbonded_force'].addException(index1_hybrid, index2_hybrid, chargeProd_old, sigma_old, epsilon_old)
self._hybrid_system_forces['core_sterics_force'].addExclusion(index1_hybrid, index2_hybrid)
# We have already handled unique old - unique old exceptions
elif len(index_set.intersection(self._atom_classes['unique_old_atoms'])) == 2:
_logger.debug(f"\t\thandle_nonbonded: _handle_original_exceptions: {exception_pair} is a unique_old-unique_old exception pair (already handled).")
continue
# Otherwise, check if one of the atoms in the set is in the unique_old_group and the other is not:
elif len(index_set.intersection(self._atom_classes['unique_old_atoms'])) == 1:
_logger.debug(f"\t\thandle_nonbonded: _handle_original_exceptions: {exception_pair} is a unique_old-core or unique_old-environment exception pair.")
if self._interpolate_14s:
self._hybrid_system_forces['standard_nonbonded_force'].addException(index1_hybrid, index2_hybrid, chargeProd_old*0.0, sigma_old, epsilon_old*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(index1_hybrid, index2_hybrid, chargeProd_old, sigma_old, epsilon_old)
self._hybrid_system_forces['core_sterics_force'].addExclusion(index1_hybrid, index2_hybrid)
# If the exception particles are neither solely old unique, solely environment, nor contain any unique old atoms, they are either core/environment or core/core
# In this case, we need to get the parameters from the exception in the other (new) system, and interpolate between the two
else:
_logger.debug(f"\t\thandle_nonbonded: _handle_original_exceptions: {exception_pair} is a core-core or core-environment exception pair.")
# First get the new indices.
index1_new = hybrid_to_new_map[index1_hybrid]
index2_new = hybrid_to_new_map[index2_hybrid]
# Get the exception parameters:
new_exception_parms= self._find_exception(new_system_nonbonded_force, index1_new, index2_new)
# If there's no new exception, then we should just set the exception parameters to be the nonbonded parameters
if not new_exception_parms:
[charge1_new, sigma1_new, epsilon1_new] = new_system_nonbonded_force.getParticleParameters(index1_new)
[charge2_new, sigma2_new, epsilon2_new] = new_system_nonbonded_force.getParticleParameters(index2_new)
chargeProd_new = charge1_new * charge2_new
sigma_new = 0.5 * (sigma1_new + sigma2_new)
epsilon_new = unit.sqrt(epsilon1_new*epsilon2_new)
else:
[index1_new, index2_new, chargeProd_new, sigma_new, epsilon_new] = new_exception_parms
# Interpolate between old and new
exception_index = self._hybrid_system_forces['standard_nonbonded_force'].addException(index1_hybrid, index2_hybrid, chargeProd_old, sigma_old, epsilon_old)
self._hybrid_system_forces['standard_nonbonded_force'].addExceptionParameterOffset('lambda_electrostatics_core', exception_index, (chargeProd_new - chargeProd_old), 0, 0)
self._hybrid_system_forces['standard_nonbonded_force'].addExceptionParameterOffset('lambda_sterics_core', exception_index, 0, (sigma_new - sigma_old), (epsilon_new - epsilon_old))
self._hybrid_system_forces['core_sterics_force'].addExclusion(index1_hybrid, index2_hybrid)
# Now, loop through the new system to collect remaining interactions. The only that remain here are
# uniquenew-uniquenew, uniquenew-core, and uniquenew-environment. There might also be core-core, since not all
# core-core exceptions exist in both
_logger.debug(f"\tlooping over new system exceptions...")
for exception_pair, exception_parameters in self._new_system_exceptions.items():
[index1_new, index2_new] = exception_pair
[chargeProd_new, sigma_new, epsilon_new] = exception_parameters
# Get hybrid indices:
index1_hybrid = self._new_to_hybrid_map[index1_new]
index2_hybrid = self._new_to_hybrid_map[index2_new]
index_set = {index1_hybrid, index2_hybrid}
# If it's a subset of unique_new_atoms, then this is an intra-unique interaction and should have its exceptions
# specified in the regular nonbonded force. However, this is handled elsewhere as above due to pecularities with exception handling
if index_set.issubset(self._atom_classes['unique_new_atoms']):
_logger.debug(f"\t\thandle_nonbonded: _handle_original_exceptions: {exception_pair} is a unique_new-unique_new exception pair (already handled).")
continue
# Look for the final class- interactions between uniquenew-core and uniquenew-environment. They are treated
# similarly: they are simply on and constant the entire time (as a valence term)
elif len(index_set.intersection(self._atom_classes['unique_new_atoms'])) > 0:
_logger.debug(f"\t\thandle_nonbonded: _handle_original_exceptions: {exception_pair} is a unique_new-core or unique_new-environment exception pair.")
if self._interpolate_14s:
self._hybrid_system_forces['standard_nonbonded_force'].addException(index1_hybrid, index2_hybrid, chargeProd_new*0.0, sigma_new, epsilon_new*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(index1_hybrid, index2_hybrid, chargeProd_new, sigma_new, epsilon_new)
self._hybrid_system_forces['core_sterics_force'].addExclusion(index1_hybrid, index2_hybrid)
# However, there may be a core exception that exists in one system but not the other (ring closure)
elif index_set.issubset(self._atom_classes['core_atoms']):
_logger.debug(f"\t\thandle_nonbonded: _handle_original_exceptions: {exception_pair} is a core-core exception pair.")
# Get the old indices
try:
index1_old = self._topology_proposal.new_to_old_atom_map[index1_new]
index2_old = self._topology_proposal.new_to_old_atom_map[index2_new]
except KeyError:
continue
# See if it's also in the old nonbonded force. if it is, then we don't need to add it.
# But if it's not, we need to interpolate
if not self._find_exception(old_system_nonbonded_force, index1_old, index2_old):
[charge1_old, sigma1_old, epsilon1_old] = old_system_nonbonded_force.getParticleParameters(index1_old)
[charge2_old, sigma2_old, epsilon2_old] = old_system_nonbonded_force.getParticleParameters(index2_old)
chargeProd_old = charge1_old*charge2_old
sigma_old = 0.5 * (sigma1_old + sigma2_old)
epsilon_old = unit.sqrt(epsilon1_old*epsilon2_old)
exception_index = self._hybrid_system_forces['standard_nonbonded_force'].addException(index1_hybrid,
index2_hybrid,
chargeProd_old,
sigma_old,
epsilon_old)
self._hybrid_system_forces['standard_nonbonded_force'].addExceptionParameterOffset(
'lambda_electrostatics_core', exception_index, (chargeProd_new - chargeProd_old), 0, 0)
self._hybrid_system_forces['standard_nonbonded_force'].addExceptionParameterOffset('lambda_sterics_core',
exception_index,
0, (sigma_new - sigma_old),
(epsilon_new - epsilon_old))
self._hybrid_system_forces['core_sterics_force'].addExclusion(index1_hybrid, index2_hybrid)
def handle_old_new_exceptions(self):
"""
Find the exceptions associated with old-old and old-core interactions, as well as new-new and new-core interactions. Theses exceptions will be placed in
CustomBondedForce that will interpolate electrostatics and a softcore potential.
"""
from openmmtools.constants import ONE_4PI_EPS0 # OpenMM constant for Coulomb interactions (implicitly in md_unit_system units)
old_new_nonbonded_exceptions = "U_electrostatics + U_sterics;"
if self._softcore_LJ_v2:
old_new_nonbonded_exceptions += "U_sterics = select(step(r - r_LJ), 4*epsilon*x*(x-1.0), U_sterics_quad);"
old_new_nonbonded_exceptions += f"U_sterics_quad = Force*(((r - r_LJ)^2)/2 - (r - r_LJ)) + U_sterics_cut;"
old_new_nonbonded_exceptions += f"U_sterics_cut = 4*epsilon*((sigma/r_LJ)^6)*(((sigma/r_LJ)^6) - 1.0);"
old_new_nonbonded_exceptions += f"Force = -4*epsilon*((-12*sigma^12)/(r_LJ^13) + (6*sigma^6)/(r_LJ^7));"
old_new_nonbonded_exceptions += f"x = (sigma/r)^6;"
old_new_nonbonded_exceptions += f"r_LJ = softcore_alpha*((26/7)*(sigma^6)*lambda_sterics_deprecated)^(1/6);"
old_new_nonbonded_exceptions += f"lambda_sterics_deprecated = new_interaction*(1.0 - lambda_sterics_insert) + old_interaction*lambda_sterics_delete;"
else:
old_new_nonbonded_exceptions += "U_sterics = 4*epsilon*x*(x-1.0); x = (sigma/reff_sterics)^6;"
old_new_nonbonded_exceptions += "reff_sterics = sigma*((softcore_alpha*lambda_alpha + (r/sigma)^6))^(1/6);"
old_new_nonbonded_exceptions += "reff_sterics = sigma*((softcore_alpha*lambda_alpha + (r/sigma)^6))^(1/6);" # effective softcore distance for sterics
old_new_nonbonded_exceptions += "lambda_alpha = new_interaction*(1-lambda_sterics_insert) + old_interaction*lambda_sterics_delete;"
old_new_nonbonded_exceptions += "U_electrostatics = (lambda_electrostatics_insert * unique_new + unique_old * (1 - lambda_electrostatics_delete)) * ONE_4PI_EPS0*chargeProd/r;"
old_new_nonbonded_exceptions += "ONE_4PI_EPS0 = %f;" % ONE_4PI_EPS0
old_new_nonbonded_exceptions += "epsilon = (1-lambda_sterics)*epsilonA + lambda_sterics*epsilonB;" # interpolation
old_new_nonbonded_exceptions += "sigma = (1-lambda_sterics)*sigmaA + lambda_sterics*sigmaB;"
old_new_nonbonded_exceptions += "lambda_sterics = new_interaction*lambda_sterics_insert + old_interaction*lambda_sterics_delete;"
old_new_nonbonded_exceptions += "new_interaction = delta(1-unique_new); old_interaction = delta(1-unique_old);"
nonbonded_exceptions_force = openmm.CustomBondForce(old_new_nonbonded_exceptions)
self._hybrid_system.addForce(nonbonded_exceptions_force)
_logger.debug(f"\thandle_old_new_exceptions: {nonbonded_exceptions_force} added to hybrid system")
# For reference, set name in force dict
self._hybrid_system_forces['old_new_exceptions_force'] = nonbonded_exceptions_force
if self._softcore_LJ_v2:
nonbonded_exceptions_force.addGlobalParameter("softcore_alpha", self._softcore_LJ_v2_alpha)
else:
nonbonded_exceptions_force.addGlobalParameter("softcore_alpha", self.softcore_alpha)
nonbonded_exceptions_force.addGlobalParameter("lambda_electrostatics_insert", 0.0) # electrostatics
nonbonded_exceptions_force.addGlobalParameter("lambda_electrostatics_delete", 0.0) # electrostatics
nonbonded_exceptions_force.addGlobalParameter("lambda_sterics_insert", 0.0) # sterics insert
nonbonded_exceptions_force.addGlobalParameter("lambda_sterics_delete", 0.0) # sterics delete
for parameter in ['chargeProd','sigmaA', 'epsilonA', 'sigmaB', 'epsilonB', 'unique_old', 'unique_new']:
nonbonded_exceptions_force.addPerBondParameter(parameter)
# Prepare for exceptions loop by grabbing nonbonded forces, hybrid_to_old/new maps
old_system_nonbonded_force = self._old_system_forces['NonbondedForce']
new_system_nonbonded_force = self._new_system_forces['NonbondedForce']
hybrid_to_old_map = {value: key for key, value in self._old_to_hybrid_map.items()}
hybrid_to_new_map = {value: key for key, value in self._new_to_hybrid_map.items()}
# First, loop through the old system's exceptions and add them to the hybrid appropriately:
for exception_pair, exception_parameters in self._old_system_exceptions.items():
[index1_old, index2_old] = exception_pair
[chargeProd_old, sigma_old, epsilon_old] = exception_parameters
# Get hybrid indices:
index1_hybrid = self._old_to_hybrid_map[index1_old]
index2_hybrid = self._old_to_hybrid_map[index2_old]
index_set = {index1_hybrid, index2_hybrid}
# Otherwise, check if one of the atoms in the set is in the unique_old_group and the other is not:
if len(index_set.intersection(self._atom_classes['unique_old_atoms'])) > 0 and (chargeProd_old.value_in_unit_system(unit.md_unit_system) != 0.0 or epsilon_old.value_in_unit_system(unit.md_unit_system) != 0.0):
_logger.debug(f"\t\thandle_old_new_exceptions: {exception_pair} is a unique_old exception pair.")
if self._interpolate_14s:
# If we are interpolating 1,4s, then we anneal this term off; otherwise, the exception force is constant and already handled in the standard nonbonded force
nonbonded_exceptions_force.addBond(index1_hybrid, index2_hybrid, [chargeProd_old, sigma_old, epsilon_old, sigma_old, epsilon_old*0.0, 1, 0])
# Next, loop through the new system's exceptions and add them to the hybrid appropriately
for exception_pair, exception_parameters in self._new_system_exceptions.items():
[index1_new, index2_new] = exception_pair
[chargeProd_new, sigma_new, epsilon_new] = exception_parameters
# Get hybrid indices:
index1_hybrid = self._new_to_hybrid_map[index1_new]
index2_hybrid = self._new_to_hybrid_map[index2_new]
index_set = {index1_hybrid, index2_hybrid}
# Look for the final class- interactions between uniquenew-core and uniquenew-environment. They are treated
# similarly: they are simply on and constant the entire time (as a valence term)
if len(index_set.intersection(self._atom_classes['unique_new_atoms'])) > 0 and (chargeProd_new.value_in_unit_system(unit.md_unit_system) != 0.0 or epsilon_new.value_in_unit_system(unit.md_unit_system) != 0.0):
_logger.debug(f"\t\thandle_old_new_exceptions: {exception_pair} is a unique_new exception pair.")
if self._interpolate_14s:
# If we are interpolating 1,4s, then we anneal this term on; otherwise, the exception force is constant and already handled in the standard nonbonded force
nonbonded_exceptions_force.addBond(index1_hybrid, index2_hybrid, [chargeProd_new, sigma_new, epsilon_new*0.0, sigma_new, epsilon_new, 0, 1])
def _find_exception(self, force, index1, index2):
"""
Find the exception that corresponds to the given indices in the given system
Parameters
----------
force : openmm.NonbondedForce object
System containing the exceptions
index1 : int
The index of the first atom (order is unimportant)
index2 : int
The index of the second atom (order is unimportant)
Returns
-------
exception_parameters : list
List of exception parameters
"""
index_set = {index1, index2}
# Loop through the exceptions and try to find one matching the criteria
for exception_idx in range(force.getNumExceptions()):
exception_parameters = force.getExceptionParameters(exception_idx)
if index_set==set(exception_parameters[:2]):
return exception_parameters
return []
def _compute_hybrid_positions(self):
"""
The positions of the hybrid system. Dimensionality is (n_environment + n_core + n_old_unique + n_new_unique)
The positions are assigned by first copying all the mapped positions from the old system in, then copying the
mapped positions from the new system. This means that there is an assumption that the positions common to old
and new are the same (which is the case for perses as-is).
Returns
-------
hybrid_positions : np.ndarray [n, 3]
Positions of the hybrid system, in nm
"""
# Get unitless positions
old_positions_without_units = np.array(self._old_positions.value_in_unit(unit.nanometer))
new_positions_without_units = np.array(self._new_positions.value_in_unit(unit.nanometer))
# Determine the number of particles in the system
n_atoms_hybrid = self._hybrid_system.getNumParticles()
# Initialize an array for hybrid positions
hybrid_positions_array = np.zeros([n_atoms_hybrid, 3])
# Loop through the old system indices, and assign positions.
for old_index, hybrid_index in self._old_to_hybrid_map.items():
hybrid_positions_array[hybrid_index, :] = old_positions_without_units[old_index, :]
# Do the same for new indices. Note that this overwrites some coordinates, but as stated above, the assumption
# is that these are the same.
for new_index, hybrid_index in self._new_to_hybrid_map.items():
hybrid_positions_array[hybrid_index, :] = new_positions_without_units[new_index, :]
return unit.Quantity(hybrid_positions_array, unit=unit.nanometers)
def _create_topology(self):
"""
Create an mdtraj topology corresponding to the hybrid system.
This is purely for writing out trajectories--it is not expected to be parameterized.
Returns
-------
hybrid_topology : mdtraj.Topology
"""
# First, make an md.Topology of the old system:
old_topology = md.Topology.from_openmm(self._topology_proposal.old_topology)
# Now make a copy for the hybrid:
hybrid_topology = copy.deepcopy(old_topology)
# Next, make a topology of the new system:
new_topology = md.Topology.from_openmm(self._topology_proposal.new_topology)
added_atoms = dict()
# Get the core atoms in the new index system (as opposed to the hybrid index system). We will need this later
core_atoms_new_indices = {self._hybrid_to_new_map[core_atom] for core_atom in self._atom_classes['core_atoms']}
# Now, add each unique new atom to the topology (this is the same order as the system)
for particle_idx in self._topology_proposal.unique_new_atoms:
new_particle_hybrid_idx = self._new_to_hybrid_map[particle_idx]
new_system_atom = new_topology.atom(particle_idx)
# First, we get the residue in the new system associated with this atom
new_system_residue = new_system_atom.residue
# Next, we have to enumerate the other atoms in that residue to find mapped atoms
new_system_atom_set = {atom.index for atom in new_system_residue.atoms}
# Now, we find the subset of atoms that are mapped. These must be in the "core" category, since they are mapped
# and part of a changing residue
mapped_new_atom_indices = core_atoms_new_indices.intersection(new_system_atom_set)
# Now get the old indices of the above atoms so that we can find the appropriate residue in the old system
# for this we can use the new to old atom map
mapped_old_atom_indices = [self._topology_proposal.new_to_old_atom_map[atom_idx] for atom_idx in mapped_new_atom_indices]
# We can just take the first one--they all have the same residue
first_mapped_old_atom_index = mapped_old_atom_indices[0]
# Get the atom object corresponding to this index from the hybrid (which is a deepcopy of the old)
mapped_hybrid_system_atom = hybrid_topology.atom(first_mapped_old_atom_index)
# Get the residue that is relevant to this atom
mapped_residue = mapped_hybrid_system_atom.residue
# Add the atom using the mapped residue
added_atoms[new_particle_hybrid_idx] = hybrid_topology.add_atom(new_system_atom.name, new_system_atom.element, mapped_residue)
# Now loop through the bonds in the new system, and if the bond contains a unique new atom, then add it to the hybrid topology
for (atom1, atom2) in new_topology.bonds:
atom1_index_in_hybrid = self._new_to_hybrid_map[atom1.index]
atom2_index_in_hybrid = self._new_to_hybrid_map[atom2.index]
# If at least one atom is in the unique new class, we need to add it to the hybrid system
if atom1_index_in_hybrid in self._atom_classes['unique_new_atoms'] or atom2_index_in_hybrid in self._atom_classes['unique_new_atoms']:
if atom1.index in self._atom_classes['unique_new_atoms']:
atom1_to_bond = added_atoms[atom1.index]
else:
atom1_to_bond = atom1
if atom2.index in self._atom_classes['unique_new_atoms']:
atom2_to_bond = added_atoms[atom2.index]
else:
atom2_to_bond = atom2
hybrid_topology.add_bond(atom1_to_bond, atom2_to_bond)
return hybrid_topology
def _impose_virtual_bonds(self):
"""
Impose virtual bonds between protein subunits and ligand(s) to ensure they are imaged together.
"""
from simtk import unit, openmm
# Determine core heavy atom indices
core_atoms = [ int(index) for index in self._atom_classes['core_atoms'] ]
heavy_atoms = [ int(index) for index in self._hybrid_topology.select('mass > 1.5') ]
core_heavy_atoms = [ int(index) for index in set(core_atoms).intersection(set(heavy_atoms)) ]
# Determine protein CA atoms
protein_atoms = [ int(index) for index in self._hybrid_topology.select('protein and name CA') ]
if len(core_heavy_atoms)==0 or len(protein_atoms)==0:
# No restraint to be added
_logger.info(f"\t\t_impose_virtual_bonds: No restraint added because one set is empty (core_atoms={core_heavy_atoms}, protein_atoms={protein_atoms})")
return
if len(set(core_atoms).intersection(set(protein_atoms))) != 0:
# Core atoms are part of protein
_logger.info(f"\t\t_impose_virtual_bonds: No restraint added because sets overlap (core_atoms={core_heavy_atoms}, protein_atoms={protein_atoms})")
return
# Filter protein CA atoms within cutoff of core heavy atoms
cutoff = 0.65 # 6.5 A
trajectory = md.Trajectory([self.hybrid_positions/unit.nanometers], topology=self._hybrid_topology)
matches = md.compute_neighbors(trajectory, cutoff, core_heavy_atoms, haystack_indices=protein_atoms, periodic=False)
protein_atoms = set()
for match in matches:
for index in match:
protein_atoms.add(int(index))
protein_atoms = [ int(index) for index in protein_atoms ]
_logger.info(f"\t\t_impose_rmsd_restraint: Restraint will be added (core_atoms={core_heavy_atoms}, protein_atoms={protein_atoms})")
# Add virtual bond between a core and protein atom to ensure they are periodically replicated together
bondforce = openmm.CustomBondForce('0')
bondforce.addBond(core_heavy_atoms[0], protein_atoms[0], [])
self._hybrid_system.addForce(bondforce)
_logger.info(f"\t\t_impose_rmsd_restraint: Added virtual bond between {core_heavy_atoms[0]} and {protein_atoms[0]} so they are imaged together")
# Extract protein and molecule chains and indices before adding solvent
mdtop = trajectory.top
protein_atom_indices = mdtop.select('protein and (mass > 1)')
molecule_atom_indices = mdtop.select('(not protein) and (not water) and (mass > 1)')
protein_chainids = list(set([atom.residue.chain.index for atom in mdtop.atoms if atom.index in protein_atom_indices]))
n_protein_chains = len(protein_chainids)
protein_chain_atom_indices = dict()
for chainid in protein_chainids:
protein_chain_atom_indices[chainid] = mdtop.select(f'protein and chainid {chainid}')
# Add a virtual bond between protein chains so they are imaged together
if (n_protein_chains > 1):
chainid = protein_chainids[0]
iatom = protein_chain_atom_indices[chainid][0]
for chainid in protein_chainids[1:]:
jatom = protein_chain_atom_indices[chainid][0]
_logger.info(f"\t\t_impose_virtual_bonds: Added virtual bond between protein chains atoms {iatom} and {jatom} so they are imaged together")
bondforce.addBond(int(iatom), int(jatom), [])
def _impose_rmsd_restraint(self):
"""
Impose an RMSD restraint between the core heavy atoms and protein CA atoms within 6A
TODO: Generalize this to accommodate options.
TODO: Don't turn this on for sidechain mutations.
"""
from simtk import unit, openmm
# Determine core heavy atom indices
core_atoms = [ int(index) for index in self._atom_classes['core_atoms'] ]
heavy_atoms = [ int(index) for index in self._hybrid_topology.select('mass > 1.5') ]
core_heavy_atoms = [ int(index) for index in set(core_atoms).intersection(set(heavy_atoms)) ]
# Determine protein CA atoms
protein_atoms = [ int(index) for index in self._hybrid_topology.select('protein and name CA') ]
if len(core_heavy_atoms)==0 or len(protein_atoms)==0:
# No restraint to be added
_logger.info(f"\t\t_impose_rmsd_restraint: No restraint added because one set is empty (core_atoms={core_heavy_atoms}, protein_atoms={protein_atoms})")
return
if len(set(core_atoms).intersection(set(protein_atoms))) != 0:
# Core atoms are part of protein
_logger.info(f"\t\t_impose_rmsd_restraint: No restraint added because sets overlap (core_atoms={core_heavy_atoms}, protein_atoms={protein_atoms})")
return
# Filter protein CA atoms within cutoff of core heavy atoms
cutoff = 0.65 # 6.5 A
trajectory = md.Trajectory([self.hybrid_positions/unit.nanometers], topology=self._hybrid_topology)
matches = md.compute_neighbors(trajectory, cutoff, core_heavy_atoms, haystack_indices=protein_atoms, periodic=False)
protein_atoms = set()
for match in matches:
for index in match:
protein_atoms.add(int(index))
protein_atoms = [ int(index) for index in protein_atoms ]
_logger.info(f"\t\t_impose_rmsd_restraint: Restraint will be added (core_atoms={core_heavy_atoms}, protein_atoms={protein_atoms})")
# Compute RMSD atom indices
rmsd_atom_indices = core_heavy_atoms + protein_atoms
kB = unit.AVOGADRO_CONSTANT_NA * unit.BOLTZMANN_CONSTANT_kB
temperature = 300 * unit.kelvin
kT = kB * temperature
sigma = 1.0 * unit.angstrom
buffer = 1.0 * unit.angstrom
custom_cv_force = openmm.CustomCVForce('step(RMSD-buffer)*(K_RMSD/2)*(RMSD-buffer)^2')
custom_cv_force.addGlobalParameter('K_RMSD', kT / sigma**2)
custom_cv_force.addGlobalParameter('buffer', buffer)
rmsd_force = openmm.RMSDForce(self.hybrid_positions, rmsd_atom_indices)
custom_cv_force.addCollectiveVariable('RMSD', rmsd_force)
self._hybrid_system.addForce(custom_cv_force)
_logger.info(f"\t\t_impose_rmsd_restraint: RMSD restraint added with buffer {buffer/unit.angstrom} A and stddev {sigma/unit.angstrom} A")
def old_positions(self, hybrid_positions):
"""
Get the positions corresponding to the old system
Parameters
----------
hybrid_positions : [n, 3] np.ndarray or simtk.unit.Quantity
The positions of the hybrid system
Returns
-------
old_positions : [m, 3] np.ndarray with unit
The positions of the old system
"""
n_atoms_old = self._topology_proposal.n_atoms_old
# making sure hybrid positions are simtk.unit.Quantity objects
if not isinstance(hybrid_positions, unit.Quantity):
hybrid_positions = unit.Quantity(hybrid_positions, unit=unit.nanometer)
old_positions = unit.Quantity(np.zeros([n_atoms_old, 3]), unit=unit.nanometer)
for idx in range(n_atoms_old):
old_positions[idx, :] = hybrid_positions[idx, :]
return old_positions
def new_positions(self, hybrid_positions):
"""
Get the positions corresponding to the new system.
Parameters
----------
hybrid_positions : [n, 3] np.ndarray or simtk.unit.Quantity
The positions of the hybrid system
Returns
-------
new_positions : [m, 3] np.ndarray with unit
The positions of the new system
"""
n_atoms_new = self._topology_proposal.n_atoms_new
# making sure hybrid positions are simtk.unit.Quantity objects
if not isinstance(hybrid_positions, unit.Quantity):
hybrid_positions = unit.Quantity(hybrid_positions, unit=unit.nanometer)
new_positions = unit.Quantity(np.zeros([n_atoms_new, 3]), unit=unit.nanometer)
for idx in range(n_atoms_new):
new_positions[idx, :] = hybrid_positions[self._new_to_hybrid_map[idx], :]
return new_positions
@property
def hybrid_system(self):
"""
The hybrid system.
Returns
-------
hybrid_system : openmm.System
The system representing a hybrid between old and new topologies
"""
return self._hybrid_system
@property
def new_to_hybrid_atom_map(self):
"""
Give a dictionary that maps new system atoms to the hybrid system.
Returns
-------
new_to_hybrid_atom_map : dict of {int, int}
The mapping of atoms from the new system to the hybrid
"""
return self._new_to_hybrid_map
@property
def old_to_hybrid_atom_map(self):
"""
Give a dictionary that maps old system atoms to the hybrid system.
Returns
-------
old_to_hybrid_atom_map : dict of {int, int}
The mapping of atoms from the old system to the hybrid
"""
return self._old_to_hybrid_map
@property
def hybrid_positions(self):
"""
The positions of the hybrid system. Dimensionality is (n_environment + n_core + n_old_unique + n_new_unique)
The positions are assigned by first copying all the mapped positions from the old system in, then copying the
mapped positions from the new system.
Returns
-------
hybrid_positions : [n, 3] Quantity nanometers
"""
return self._hybrid_positions
@property
def hybrid_topology(self):
"""
An MDTraj hybrid topology for the purpose of writing out trajectories. Note that we do not expect this to be
able to be parameterized by the openmm forcefield class.
Returns
-------
hybrid_topology : mdtraj.Topology
"""
return self._hybrid_topology
@property
def omm_hybrid_topology(self):
"""
An OpenMM format of the hybrid topology. Also cannot be used to parameterize system, only to write out trajectories.
Returns
-------
hybrid_topology : simtk.openmm.app.Topology
"""
return md.Topology.to_openmm(self._hybrid_topology)
class RepartitionedHybridTopologyFactory(HybridTopologyFactory):
"""
subclass of the HybridTopologyFactory to allow for more expansive alchemical regions and controllability
"""
def __init__(self,
topology_proposal,
current_positions,
new_positions,
endstate,
alchemical_region=None,
flatten_torsions=False,
interpolate_old_and_new_14s=False,
**kwargs):
"""
arguments
topology_proposal : TopologyProposal
topology proposal of the region of interest
current_positions : simtk.unit.Quantity
positions of old system
new_positions : simtk.unit.Quantity
positions of new system
alchemical_region : list, default None
list of atoms comprising the alchemical region; if None, core_atoms + unique_new_atoms + unique_old_atoms are alchemical region
endstate : int
the lambda endstate to parameterize
flatten_torsions : bool, default False
if True, torsion terms involving `unique_new_atoms` will be scaled such that at lambda=0,1, the torsion term is turned off/on respectively
the opposite is true for `unique_old_atoms`.
interpolate_old_and_new_14s : bool, default False
if True, 1,4 exception terms involving `unique_new_atoms` will be scaled such that at lambda=0,1, the 1,4 exception term is turned off/on respectively
the opposite is true for `unique_old_atoms`.
"""
from itertools import chain
self._topology_proposal = topology_proposal
self._old_system = copy.deepcopy(topology_proposal.old_system)
self._new_system = copy.deepcopy(topology_proposal.new_system)
self._old_to_hybrid_map = {}
self._new_to_hybrid_map = {}
self._hybrid_system_forces = dict()
self._old_positions = current_positions
self._new_positions = new_positions
self._endstate = endstate
self._flatten_torsions = flatten_torsions
self._interpolate_14s = interpolate_old_and_new_14s
if endstate == 0 or endstate == 1:
_logger.info("*** Generating RepartitionedHybridTopologyFactory ***")
elif endstate is None:
raise Exception("endstate must be 0 or 1! Aborting!")
if self._flatten_torsions:
_logger.info("Flattening torsions of unique new/old at lambda = 0/1")
if self._interpolate_14s:
_logger.info("Flattening exceptions of unique new/old at lambda = 0/1")
#the softcore is defaulted as True even though we are not using it...we only need it to pass to the
self._softcore_LJ_v2 = True
self._softcore_electrostatics = True
self._softcore_LJ_v2_alpha = 0.85
self._softcore_electrostatics_alpha = 0.3
self._softcore_sigma_Q = 1.0
self._has_functions = False
self._use_dispersion_correction = False
# Prepare dicts of forces, which will be useful later
# TODO: Store this as self._system_forces[name], name in ('old', 'new', 'hybrid') for compactness
self._old_system_forces = {type(force).__name__ : force for force in self._old_system.getForces()}
self._new_system_forces = {type(force).__name__ : force for force in self._new_system.getForces()}
_logger.info(f"Old system forces: {self._old_system_forces.keys()}")
_logger.info(f"New system forces: {self._new_system_forces.keys()}")
# Check that there are no unknown forces in the new and old systems:
for system_name in ('old', 'new'):
force_names = getattr(self, '_{}_system_forces'.format(system_name)).keys()
unknown_forces = set(force_names) - set(self._known_forces)
if len(unknown_forces) > 0:
raise ValueError(f"Unkown forces {unknown_forces} encountered in {system_name} system")
_logger.info("No unknown forces.")
# Get and store the nonbonded method from the system:
self._nonbonded_method = self._old_system_forces['NonbondedForce'].getNonbondedMethod()
_logger.info(f"Nonbonded method to be used (i.e. from old system): {self._nonbonded_method}")
# Start by creating an empty system. This will become the hybrid system.
self._hybrid_system = openmm.System()
# Begin by copying all particles in the old system to the hybrid system. Note that this does not copy the
# interactions. It does, however, copy the particle masses. In general, hybrid index and old index should be
# the same.
# TODO: Refactor this into self._add_particles()
_logger.info("Adding and mapping old atoms to hybrid system...")
for particle_idx in range(self._topology_proposal.n_atoms_old):
particle_mass = self._old_system.getParticleMass(particle_idx)
hybrid_idx = self._hybrid_system.addParticle(particle_mass)
self._old_to_hybrid_map[particle_idx] = hybrid_idx
#If the particle index in question is mapped, make sure to add it to the new to hybrid map as well.
if particle_idx in self._topology_proposal.old_to_new_atom_map.keys():
particle_index_in_new_system = self._topology_proposal.old_to_new_atom_map[particle_idx]
self._new_to_hybrid_map[particle_index_in_new_system] = hybrid_idx
# Next, add the remaining unique atoms from the new system to the hybrid system and map accordingly.
# As before, this does not copy interactions, only particle indices and masses.
_logger.info("Adding and mapping new atoms to hybrid system...")
for particle_idx in self._topology_proposal.unique_new_atoms:
particle_mass = self._new_system.getParticleMass(particle_idx)
hybrid_idx = self._hybrid_system.addParticle(particle_mass)
self._new_to_hybrid_map[particle_idx] = hybrid_idx
# Check that if there is a barostat in the original system, it is added to the hybrid.
# We copy the barostat from the old system.
if "MonteCarloBarostat" in self._old_system_forces.keys():
barostat = copy.deepcopy(self._old_system_forces["MonteCarloBarostat"])
self._hybrid_system.addForce(barostat)
_logger.info("Added MonteCarloBarostat.")
else:
_logger.info("No MonteCarloBarostat added.")
# Copy over the box vectors:
box_vectors = self._old_system.getDefaultPeriodicBoxVectors()
self._hybrid_system.setDefaultPeriodicBoxVectors(*box_vectors)
_logger.info(f"getDefaultPeriodicBoxVectors added to hybrid: {box_vectors}")
# Create the opposite atom maps for use in nonbonded force processing; let's omit this from logger
self._hybrid_to_old_map = {value : key for key, value in self._old_to_hybrid_map.items()}
self._hybrid_to_new_map = {value : key for key, value in self._new_to_hybrid_map.items()}
# Assign atoms to one of the classes described in the class docstring
self._atom_classes = self._determine_atom_classes()
_logger.info("Determined atom classes.")
# Construct dictionary of exceptions in old and new systems
_logger.info("Generating old system exceptions dict...")
self._old_system_exceptions = self._generate_dict_from_exceptions(self._old_system_forces['NonbondedForce'])
_logger.info("Generating new system exceptions dict...")
self._new_system_exceptions = self._generate_dict_from_exceptions(self._new_system_forces['NonbondedForce'])
self._validate_disjoint_sets()
# Copy constraints, checking to make sure they are not changing
_logger.info("Handling constraints...")
self._handle_constraints()
# Copy over relevant virtual sites
_logger.info("Handling virtual sites...")
self._handle_virtual_sites()
# Combine alchemical regions
default_alchemical_region = set(chain(self._atom_classes['core_atoms'], self._atom_classes['unique_new_atoms'], self._atom_classes['unique_old_atoms']))
if alchemical_region is None:
self._alchemical_region = default_alchemical_region
else:
assert default_alchemical_region.issubset(set(alchemical_region)), f"the given alchemical region must include _all_ atoms in the default alchemical region"
self._alchemical_region = set(alchemical_region).union(default_alchemical_region)
# First thing to do is to copy over all of the standard valence force objects into the hybrid system
self._handle_bonds()
self._handle_angles()
self._handle_torsions()
# Then add the nonbonded force (this is _slightly_ trickier)
if 'NonbondedForce' in self._old_system_forces or 'NonbondedForce' in self._new_system_forces:
self._add_nonbonded_force_terms(add_custom_sterics_force=False)
self._handle_nonbonded()
# The last thing to do is call the alchemical factory on the _hybrid_system
# self._alchemify()
# Generate the topology representation
self._hybrid_topology = self._create_topology()
# Get positions for the hybrid
self._hybrid_positions = self._compute_hybrid_positions()
def _handle_bonds(self):
"""
Copy over the appropriate bonds from the old or new system to the hybrid system;
If the endstate is old, then we copy all of the old system force terms to the hybrid system and then iterate through
the new system, copying over all of the force terms that contain a unique new atom;
Do the opposite if at the new endstate
"""
# Define the force we are going to write to
self._hybrid_system_forces['HarmonicBondForce'] = openmm.HarmonicBondForce()
to_force = self._hybrid_system_forces['HarmonicBondForce']
# Define the template force and the auxiliary force
if self._endstate == 0:
template_force = self._old_system_forces['HarmonicBondForce']
aux_force = self._new_system_forces['HarmonicBondForce']
target_index_set = self._atom_classes['unique_new_atoms']
template_map = self._old_to_hybrid_map
aux_map = self._new_to_hybrid_map
elif self._endstate == 1:
template_force = self._new_system_forces['HarmonicBondForce']
aux_force = self._old_system_forces['HarmonicBondForce']
target_index_set = self._atom_classes['unique_old_atoms']
template_map = self._new_to_hybrid_map
aux_map = self._old_to_hybrid_map
else:
raise Exception(f"endstate must be 0 or 1")
# Copy over the template force...
for idx in range(template_force.getNumBonds()):
p1, p2, length, k = template_force.getBondParameters(idx)
hybrid_p1, hybrid_p2 = template_map[p1], template_map[p2]
to_force.addBond(hybrid_p1, hybrid_p2, length, k)
#Query the auxiliary force to extract and copy over the 'special' terms that don't exist in the template force
for idx in range(aux_force.getNumBonds()):
p1, p2, length, k = aux_force.getBondParameters(idx)
hybrid_p1, hybrid_p2 = aux_map[p1], aux_map[p2]
if set([hybrid_p1, hybrid_p2]).intersection(target_index_set) != set():
# If there is a target atom in the auxiliary term, write it to the hybrid force
to_force.addBond(hybrid_p1, hybrid_p2, length, k)
# Then add the to_force to the hybrid_system
self._hybrid_system.addForce(to_force)
def _handle_angles(self):
"""
Copy over the appropriate angles from the old or new system to the hybrid system;
If the endstate is old, then we copy all of the old system force terms to the hybrid system and then iterate through
the new system, copying over all of the force terms that contain a unique new atom;
Do the opposite if at the new endstate
"""
# Define the force we are going to write to
self._hybrid_system_forces['HarmonicAngleForce'] = openmm.HarmonicAngleForce()
to_force = self._hybrid_system_forces['HarmonicAngleForce']
# Define the template force and the auxiliary force
if self._endstate == 0:
template_force = self._old_system_forces['HarmonicAngleForce']
aux_force = self._new_system_forces['HarmonicAngleForce']
target_index_set = self._atom_classes['unique_new_atoms']
template_map = self._old_to_hybrid_map
aux_map = self._new_to_hybrid_map
elif self._endstate == 1:
template_force = self._new_system_forces['HarmonicAngleForce']
aux_force = self._old_system_forces['HarmonicAngleForce']
target_index_set = self._atom_classes['unique_old_atoms']
template_map = self._new_to_hybrid_map
aux_map = self._old_to_hybrid_map
else:
raise Exception(f"endstate must be 0 or 1")
# Copy over the template force...
for idx in range(template_force.getNumAngles()):
p1, p2, p3, angle, k = template_force.getAngleParameters(idx)
hybrid_p1, hybrid_p2, hybrid_p3 = template_map[p1], template_map[p2], template_map[p3]
to_force.addAngle(hybrid_p1, hybrid_p2, hybrid_p3, angle, k)
# Query the auxiliary force to extract and copy over the 'special' terms that don't exist in the template force
for idx in range(aux_force.getNumAngles()):
p1, p2, p3, angle, k = aux_force.getAngleParameters(idx)
hybrid_p1, hybrid_p2, hybrid_p3 = aux_map[p1], aux_map[p2], aux_map[p3]
if set([hybrid_p1, hybrid_p2, hybrid_p3]).intersection(target_index_set) != set():
# If there is a target atom in the auxiliary term, write it to the hybrid force
to_force.addAngle(hybrid_p1, hybrid_p2, hybrid_p3, angle, k)
# Then add the to_force to the hybrid_system
self._hybrid_system.addForce(to_force)
def _handle_torsions(self):
"""
Copy over the appropriate torsions from the old or new system to the hybrid system;
If the endstate is old, then we copy all of the old system force terms to the hybrid system and then iterate through
the new system, copying over all of the force terms that contain a unique new atom;
Do the opposite if at the new endstate
"""
# Define the force we are going to write to
self._hybrid_system_forces['PeriodicTorsionForce'] = openmm.PeriodicTorsionForce()
to_force = self._hybrid_system_forces['PeriodicTorsionForce']
# Define the template force and the auxiliary force
if self._endstate == 0:
template_force = self._old_system_forces['PeriodicTorsionForce']
aux_force = self._new_system_forces['PeriodicTorsionForce']
target_index_set = self._atom_classes['unique_new_atoms']
template_map = self._old_to_hybrid_map
aux_map = self._new_to_hybrid_map
elif self._endstate == 1:
template_force = self._new_system_forces['PeriodicTorsionForce']
aux_force = self._old_system_forces['PeriodicTorsionForce']
target_index_set = self._atom_classes['unique_old_atoms']
template_map = self._new_to_hybrid_map
aux_map = self._old_to_hybrid_map
else:
raise Exception(f"endstate must be 0 or 1")
# Copy over the template force...
for idx in range(template_force.getNumTorsions()):
p1, p2, p3, p4, periodicity, phase, k = template_force.getTorsionParameters(idx)
hybrid_p1, hybrid_p2, hybrid_p3, hybrid_p4 = template_map[p1], template_map[p2], template_map[p3], template_map[p4]
to_force.addTorsion(hybrid_p1, hybrid_p2, hybrid_p3, hybrid_p4, periodicity, phase, k)
# Query the auxiliary force to extract and copy over the 'special' terms that don't exist in the template force
for idx in range(aux_force.getNumTorsions()):
p1, p2, p3, p4, periodicity, phase, k = aux_force.getTorsionParameters(idx)
hybrid_p1, hybrid_p2, hybrid_p3, hybrid_p4 = aux_map[p1], aux_map[p2], aux_map[p3], aux_map[p4]
if set([hybrid_p1, hybrid_p2, hybrid_p3, hybrid_p4]).intersection(target_index_set) != set():
# If there is a target atom in the auxiliary term, write it to the hybrid force
scale = 1. if not self._flatten_torsions else 0.
to_force.addTorsion(hybrid_p1, hybrid_p2, hybrid_p3, hybrid_p4, periodicity, phase, k*scale)
# Then add the to_force to the hybrid_system
self._hybrid_system.addForce(to_force)
def _handle_nonbonded(self):
"""
Transcribe nonbonded forces; depending on the endstate
"""
old_system_nonbonded_force = self._old_system_forces['NonbondedForce']
new_system_nonbonded_force = self._new_system_forces['NonbondedForce']
hybrid_to_old_map = self._hybrid_to_old_map
hybrid_to_new_map = self._hybrid_to_new_map
for particle_index in range(self._hybrid_system.getNumParticles()):
if particle_index in self._atom_classes['unique_old_atoms']:
scale = 0.0 if self._endstate == 1 else 1.0
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is a unique_old")
# Get the parameters in the old system
old_index = hybrid_to_old_map[particle_index]
[charge, sigma, epsilon] = old_system_nonbonded_force.getParticleParameters(old_index)
# Add particle to the regular nonbonded force, but Lennard-Jones will be handled by CustomNonbondedForce
check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(scale*charge, sigma, scale*epsilon) # add charge to standard_nonbonded force
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
elif particle_index in self._atom_classes['unique_new_atoms']:
scale = 1.0 if self._endstate == 1 else 0.0
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is a unique_new")
# Get the parameters in the new system
new_index = hybrid_to_new_map[particle_index]
[charge, sigma, epsilon] = new_system_nonbonded_force.getParticleParameters(new_index)
# Add particle to the regular nonbonded force, but Lennard-Jones will be handled by CustomNonbondedForce
check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(scale*charge, sigma, scale*epsilon) # charge starts at zero
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
elif particle_index in self._atom_classes['core_atoms']:
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is a core")
# Get the parameters in the new and old systems:
old_index = hybrid_to_old_map[particle_index]
[charge_old, sigma_old, epsilon_old] = old_system_nonbonded_force.getParticleParameters(old_index)
new_index = hybrid_to_new_map[particle_index]
[charge_new, sigma_new, epsilon_new] = new_system_nonbonded_force.getParticleParameters(new_index)
# Still add the particle to the regular nonbonded force, but with zeroed out parameters; add old charge to standard_nonbonded and zero sterics
if self._endstate == 0:
to_charge, to_sigma, to_epsilon = charge_old, sigma_old, epsilon_old
else:
to_charge, to_sigma, to_epsilon = charge_new, sigma_new, epsilon_new
check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(to_charge, to_sigma, to_epsilon)
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
# Otherwise, the particle is in the environment
else:
_logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is an envronment")
assert particle_index in self._atom_classes['environment_atoms']
# The parameters will be the same in new and old system, so just take the old parameters
if self._endstate == 0:
old_index = hybrid_to_old_map[particle_index]
[charge, sigma, epsilon] = old_system_nonbonded_force.getParticleParameters(old_index)
elif self._endstate == 1:
new_index = hybrid_to_new_map[particle_index]
[charge, sigma, epsilon] = new_system_nonbonded_force.getParticleParameters(new_index)
else:
raise Exception()
# Add the environment atoms to the regular nonbonded force as well: should we be adding steric terms here, too?
check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(charge, sigma, epsilon)
assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system"
self._handle_exceptions()
def _handle_exceptions(self):
"""
Instead of excluding interactions that shouldn't occur, we provide exceptions for interactions that were zeroed
out but should occur. we do not allow for the interpolation of 1,4s
"""
old_system_nonbonded_force = self._old_system_forces['NonbondedForce']
new_system_nonbonded_force = self._new_system_forces['NonbondedForce']
# Prepare the atom classes
unique_old_atoms = self._atom_classes['unique_old_atoms']
unique_new_atoms = self._atom_classes['unique_new_atoms']
if self._endstate == 0:
template_force = self._old_system_forces['NonbondedForce']
aux_force = self._new_system_forces['NonbondedForce']
index_map = self._old_to_hybrid_map
unquerieds = unique_new_atoms
aux_map = self._new_to_hybrid_map
elif self._endstate == 1:
template_force = self._new_system_forces['NonbondedForce']
aux_force = self._old_system_forces['NonbondedForce']
index_map = self._new_to_hybrid_map
unquerieds = unique_old_atoms
aux_map = self._old_to_hybrid_map
else:
raise Exception()
#add the template exceptions
for exception_idx in range(template_force.getNumExceptions()):
p1, p2, chargeprod, sigma, epsilon = template_force.getExceptionParameters(exception_idx)
hybrid_p1, hybrid_p2 = index_map[p1], index_map[p2]
self._hybrid_system_forces['standard_nonbonded_force'].addException(hybrid_p1, hybrid_p2, chargeprod, sigma, epsilon)
#now for the auxiliary force
for exception_idx in range(aux_force.getNumExceptions()):
p1, p2, chargeprod, sigma, epsilon = aux_force.getExceptionParameters(exception_idx)
hybrid_p1, hybrid_p2 = aux_map[p1], aux_map[p2]
if set([hybrid_p1, hybrid_p2]).intersection(unquerieds) != set():
if self._interpolate_14s:
self._hybrid_system_forces['standard_nonbonded_force'].addException(hybrid_p1, hybrid_p2, chargeprod*0.0, sigma, epsilon*0.0)
else:
self._hybrid_system_forces['standard_nonbonded_force'].addException(hybrid_p1, hybrid_p2, chargeprod, sigma, epsilon)
|
choderalab/perses
|
perses/annihilation/relative.py
|
Python
|
mit
| 161,386
|
[
"Amber",
"MDTraj",
"OpenMM"
] |
82113a28c176cf7b235835602f08722ccc2a0ddb33186fc9e7a9c8354d487f39
|
# -*- encoding: utf-8 -*-
import os
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.urls import reverse
from selenium.common.exceptions import NoSuchElementException
from splinter.driver.webdriver.phantomjs import WebDriver as SplinterWebDriver
from scriptorium.helpers.npm import resolve_path
class Tester(SplinterWebDriver):
def __init__(self, *args, **kwargs):
self.current_test = None # type: StaticLiveServerTestCase
super().__init__(*args, **kwargs)
def visit(self, view_name, **kwargs):
url = reverse(view_name, **kwargs)
return super().visit(self.current_test.live_server_url + url)
def see(self, text, selector=None):
try:
elements = self.find_by_present_text(text, selector)
except NoSuchElementException:
elements = []
if len(elements) == 0:
raise AssertionError(
'Text "%s" not found on page "%s"' % (text, self.url))
def click(self, text, selector=None):
elements = self.find_by_present_text(text, selector)
if len(elements) == 0:
raise Exception(
'No clickable item found with the text: "%s"' % text)
elif len(elements) > 1:
raise Exception(
'There are more then one clickable item with the '
'text "%s"' % text)
elements.pop().click()
def find_by_present_text(self, text, selector=None):
if selector is None:
selector = '*'
clickable_items = self.driver.find_elements_by_css_selector(selector)
elements = []
search_term = text.lower()
for element in clickable_items:
if search_term in str(element.text).lower():
elements.append(element)
return elements
def screenshot(self, name=None, suffix='.png'):
name = name or ''
folder_name = self.current_test.__class__.__name__
filename = self.current_test._testMethodName
full_path = 'runtime/test/screenshots/%s/%s.png' % (
folder_name, filename)
if os.path.exists(full_path):
os.unlink(full_path)
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
if self.driver.get_screenshot_as_file(full_path) is False:
raise Exception('failed to create screenshot')
return full_path
class SeleniumTestCase(StaticLiveServerTestCase):
def setUp(self):
self.tester.current_test = self
super().setUp()
def _post_teardown(self):
self.tester.screenshot()
self.tester.driver.delete_all_cookies()
self.tester.driver.get('about:blank')
self.tester.driver.refresh()
super()._post_teardown()
@classmethod
def setUpClass(cls):
super(SeleniumTestCase, cls).setUpClass()
splinter = Tester(executable_path=resolve_path('.bin/phantomjs'))
cls.tester = splinter
cls.selenium = splinter.driver
cls.selenium.implicitly_wait(10)
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(SeleniumTestCase, cls).tearDownClass()
def reverse(self, view):
return self.live_server_url + reverse(view)
|
alex20465/open-scriptorium
|
scriptorium/base/test/__init__.py
|
Python
|
mit
| 3,305
|
[
"VisIt"
] |
603ffb15757e510824b5ad88057e215deef768c34b782e0eb7ca49e088e309f3
|
#!/usr/bin/env python
'''
GOAL:
- test several models that describe how disks shrink and integrated SFR decreases as a result of outside-in quenching
USAGE
- from within ipython
%run ~/Dropbox/pythonCode/LCSsimulate-infall.py
t = run_sim(tmax=0,drdt_step=0.05,nrandom=1000)
t = run_sim(tmax=1,drdt_step=0.05,nrandom=1000)
t = run_sim(tmax=2,drdt_step=0.05,nrandom=1000)
t = run_sim(tmax=3,drdt_step=0.05,nrandom=1000)
t = run_sim(tmax=4,drdt_step=0.05,nrandom=1000)
t = run_sim(tmax=5,drdt_step=0.05,nrandom=1000)
Written by Rose A. Finn, 2/21/18
Updated 2019-2020 to incorporate total SFRs into the comparison.
'''
from astropy.io import fits,ascii
from astropy.table import Table
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.tri as tri
import argparse
from scipy.stats import ks_2samp
# the incomplete gamma function, for integrating the sersic profile
from scipy.special import gammainc
from scipy.interpolate import griddata
#from astropy.table import Table
import os
import LCScommon as lcommon
import multiprocessing as mp
homedir = os.getenv("HOME")
plotdir = homedir+'/research/LCS/plots/'
# import mass-matching function from lcs_paper2
import sys
sys.path.append(homedir+'/github/LCS/python/')
from lcs_paper2 import mass_match
###########################
##### SET UP ARGPARSE
###########################
parser = argparse.ArgumentParser(description ='Program to run simulation for LCS paper 2')
parser.add_argument('--BTcut', dest = 'BTcut', default = False, action='store_true',help = 'use sample with BTcut imposed')
parser.add_argument('--use24', dest = 'use24', default = True, action='store_false',help = 'use 24um profile parameters when calculating expected SFR of sim galaxies. default is true')
parser.add_argument('--model', dest = 'model', default = 1, help = 'infall model to use. default is 1. \n\tmodel 1 is shrinking 24um effective radius \n\tmodel 2 is truncatingthe 24um emission')
parser.add_argument('--sfrint', dest = 'sfrint', default = 1, help = 'method for integrating the SFR in model 2. \n\tmethod 1 = integrate external sersic profile out to truncation radius.\n\tmethod 2 = integrate fitted sersic profile out to rmax.')
parser.add_argument('--pvalue', dest = 'pvalue', default = .005, help = 'pvalue threshold to use when plotting fraction of trials below this pvalue. Default is 0.05 (2sigma). For ref, 3sigma is .003.')
parser.add_argument('--tmax', dest = 'tmax', default = 3., help = 'maximum infall time. default is 3 Gyr. ')
parser.add_argument('--rmax', dest = 'rmax', default = 6., help = 'maximum size of SF disk in terms of Re. default is 6. ')
parser.add_argument('--masscut', dest = 'masscut', default = 9.7, help = 'mass cut for sample. default is logMstar > 9.5 ')
parser.add_argument('--sampleks', dest = 'sampleks', default = False, action='store_true',help = 'run KS test to compare core/external size, SFR, Re24 and nsersic24. default is False.')
args = parser.parse_args()
args.model = int(args.model)
args.sfrint = int(args.sfrint)
args.tmax = float(args.tmax)
args.pvalue = float(args.pvalue)
###########################
##### DEFINITIONS
###########################
mycolors = plt.rcParams['axes.prop_cycle'].by_key()['color']
mipspixelscale=2.45
###########################
##### PLOTTING LABELS
###########################
drdt_label1 = r'$\dot{R}_{24} \ (R_{24}/Gyr^{-1}) $'
drdt_label2 = r'$\dot{R}_{trunc} \ (R_24}/Gyr^{-1}) $'
def get_MS(logMstar,slope=0.4731,intercept=-4.877):
'''
get MS fit that BV calculated from GSWLC;
MSfit = can use with alternate fit function if you provide the function
'''
#return 0.53*logMstar-5.5
# for no BT cut, e < 0.75
return slope*logMstar+intercept
## infall rates
# uniform distribution between 0 and tmax Gyr
tmax = 2. # max infall time in Gyr
###########################
##### READ IN DATA FILE
##### WITH SIZE INFO
###########################
# updated input file to include SFR, n
#infile = homedir+'/research/LCS/tables/LCS-simulation-data.fits'
if args.BTcut:
infile1 = homedir+'/research/LCS/tables/lcs-sfr-sim-BTcut.fits'
infile2 = homedir+'/research/LCS/tables/gsw-sfr-sim-BTcut.fits'
else:
infile1 = homedir+'/research/LCS/tables/lcs-sfr-sim.fits'
infile2 = homedir+'/research/LCS/tables/gsw-sfr-sim.fits'
lcs = Table.read(infile1)
field = Table.read(infile2)
core_sfr = 10.**(lcs['logSFR'])
external_sfr = 10.**(field['logSFR'])
core_dsfr = lcs['logSFR'] - get_MS(lcs['logMstar'])
core_logmstar = (lcs['logMstar'])
external_logmstar = (field['logMstar'])
###########################
##### compare core/external
###########################
if args.sampleks:
print('\ncore vs external: logMstar distribution')
lcommon.ks(core_logmstar,external_logmstar,run_anderson=True)
print()
print('\ncore vs external: SFR distribution')
lcommon.ks(core_sfr,external_sfr,run_anderson=True)
###########################
##### FUNCTIONS
###########################
def grid_xyz(x,y,z,nbins=20,color=None):
''' bin non-equally spaced data for use with contour plot '''
# https://matplotlib.org/3.3.2/gallery/images_contours_and_fields/irregulardatagrid.html
# griddata
xspan = np.linspace(int(min(x)),int(max(x)),nbins)
yspan = np.linspace(int(min(y)),int(max(y)),nbins)
triang = tri.Triangulation(x,y)
interpolator = tri.LinearTriInterpolator(triang,z)
xgrid,ygrid = np.meshgrid(xspan,yspan)
zgrid = interpolator(xgrid,ygrid)
#t = griddata((xspan,yspan),z,method='linear')
# plot contour levels
#grid = griddata((x,y),z,(xgrid,ygrid),method='linear')
return xgrid,ygrid,zgrid
def contour_xyz(x,y,z,ngrid=20,color=None):
''' bin non-equally spaced data for use with contour plot '''
# griddata
xspan = np.arange(int(min(x)),int(max(x)),ngrid)
yspan = np.arange(int(min(y)),int(max(y)),ngrid)
xgrid,ygrid = np.meshgrid(xspan,yspan)
grid = griddata((x,y),z,(xgrid,ygrid),method='linear')
return grid
def model2_get_fitted_param(input,coeff):
# here rtrunc is the truncation radius/Re
return coeff[0]+coeff[1]*np.exp(coeff[2]*input)
def integrate_sersic(n,Re,Ie,rmax=6):
bn = 1.999*n-0.327
x = bn*(rmax/Re)**(1./n)
return Ie*Re**2*2*np.pi*n*np.exp(bn)/bn**(2*n)*gammainc(2*n, x)
def get_frac_flux_retained(n,ratio_before,ratio_after):
# ratio_before = the initial value of R/Re
# ratio_after = the final value of R/Re
# n = sersic index of profile
# L(<R) = Ie Re^2 2 pi n e^{b_n}/b_n^2n incomplete_gamma(2n, x)
# calculate the loss in light
bn = 1.999*n-0.327
x_before = bn*(ratio_before)**(1./n)
x_after = bn*(ratio_after)**(1./n)
frac_retained = gammainc(2*n,x_after)/gammainc(2*n,x_before)
return frac_retained
'''
I think the right way to do the integration (inspiration during my jog today) is to integrate
the sersic profile of the external profile(Re24, n24) from zero to inf.
for sim core galaxy, integrate sersic profile with new Re from zero to inf.
Don't know what to do with sersic index. As a first attempt, leave it
the same as for the external galaxy.
gammainc = 1 when integrating to infinity
'''
def get_frac_flux_retained0(n,ratio_before,ratio_after,Iboost=1):
'''
calculate fraction of flux retained after shrinking Re of sersic profile
PARAMS
------
* ratio_before: the initial value of R24/Re_r
* ratio_after: the final value of R24/Re_r
* n: sersic index of profile
# L(<R) = Ie Re^2 2 pi n e^{b_n}/b_n^2n incomplete_gamma(2n, x)
RETURNS
-------
* frac_retained: fraction of flux retained
'''
# calculate the loss in light
bn = 1.999*n-0.327
x_before = bn*(ratio_before)**(1./n)
x_after = bn*(ratio_after)**(1./n)
# everything is the same except for Re(24)
### check this!!! gamma function might be different???
frac_retained = Iboost*(ratio_after/ratio_before)**2
return frac_retained
def get_frac_flux_retained_model2(n,Re,rtrunc=1,rmax=4,version=1,Iboost=1):
'''
return fraction of the flux retained by a truncated profile.
this model integrates the after model to the truncation radius AND
boosts the central intensity of the after model.
The sersic index is unchanged.
PARAMS
------
* n: sersic index of profile
* Re: effective radius of sersic profile
* rtrunc: truncation radius, in terms of Re; default=1
* rmax: maximum extent of the disk, in terms of Re, for the purpose of integration; default=6
- this is how far out the original profile is integrated to, rather than infinity
* version:
- 1 = integrate the truncated profile
- 2 = integrate the sersic profile we would measure by fitting a sersic profile to the truncated profile
- best to use option 1
* Iboost: factor to boost central intensity by
RETURNS
-------
* fraction of the flux retained
'''
if version == 1:
# sersic index of profile
# Re = effective radius of profile
# n = sersic index of profile
# rmax = multiple of Re to use a max radius of integration in the "before" integral
# rtrunc = max radius to integrate to in "after" integral
# ORIGINAL
# L(<R) = Ie Re^2 2 pi n e^{b_n}/b_n^2n incomplete_gamma(2n, x)
# PROCESSED
# L(<R) = boost*Ie Re^2 2 pi n e^{b_n}/b_n^2n incomplete_gamma(2n, x)
# calculate the loss in light
# this should simplify to the ratio of the incomplete gamma functions
# ... I think ...
# this is the same for both
bn = 1.999*n-0.327
x_after = bn*(rtrunc/Re)**(1./n)
x_before = bn*(rmax)**(1./n)
frac_retained = Iboost*gammainc(2*n,x_after)/gammainc(2*n,x_before)
elif version == 2:
# use fitted values of model to get integrated flux after
# integral of input sersic profile with integral of fitted sersic profile
Ie = 1
sfr_before = integrate_sersic(n,Re,Ie,rmax=rmax)
n2 = n*model2_get_fitted_param(rtrunc/Re,sersicN_fit)
Re2 = Re*model2_get_fitted_param(rtrunc/Re,sersicRe_fit)
Ie2 = Ie*model2_get_fitted_param(rtrunc/Re,sersicIe_fit)
sfr_after = integrate_sersic(n2,Re2,Ie2,rmax=rmax)
frac_retained = Iboost*sfr_after/sfr_before
return frac_retained
def get_whitaker_ms(logmstar,z):
''' get whitaker '''
pass
def get_sfr_mstar_at_infall(sfr0,mstar0,tinfall):
'''
get the sfr and stellar mass at the time of infall,
using a grid of models created by sfr-mstar-forward-model.py
INPUT:
* sfr0 : array with redshift zero log10 SFRs of field galaxies
* mstar0 : an array with z=0 log10 stellar mass values of field galaxies
* tinfall : an array with the infall time for each field galaxy
RETURNS:
* sfr_infall : an array with the log10 sfr of each galaxy at the time of infall
* mstar_infall : an array with the log10 stellar mass of each galaxy at the time of infall
'''
sfr_infall = np.zeros(len(sfr0))
mstar_infall = np.zeros(len(sfr0))
allindex = np.arange(len(lookup_table))
for i in range(len(tinfall)):
dsfr = np.abs(sfr0[i] - lookup_table['logSFR0'])
dmstar = np.abs(mstar0[i] - lookup_table['logMstar0'])
dtinfall = np.abs(tinfall[i] - lookup_table['lookbackt'])
distance = dsfr + dmstar + dtinfall
# find entry that falls closest to input galaxy
match_index = distance == np.min(distance)
match_index = allindex[match_index]
sfr_infall[i] = lookup_table['logSFR'][match_index]
mstar_infall[i] = lookup_table['logMstar'][match_index]
# find closest match in using sfr0, mstar0, tinfall-tab['lookbackt']
return sfr_infall,mstar_infall
def get_fraction_mass_retained(t):
'''
use Poggianti+2013 relation to determine fraction of mass retained. This applies to
stellar populations with ages greater than 1.9E6 yrs. For younger pops, the fraction
retained is just one.
INPUT:
* time in yr since the birth of the stellar pop
RETURNS:
* fraction of the mass that is retained after mass loss when age of population is t_yr
'''
frac = np.ones(len(t))
flag = t > 1.9e6
frac[flag] = 1.749 - 0.124*np.log10(t[flag])
return frac
def get_delta_mass(infall_sfr,infall_times,tau):
'''
compute the amount of mass gained by the galaxy since tinfall.
this includes mass added from star formation, and mass lost.
INPUT
* infall_sfr : array of infall sfrs
* infall_times : array containing time since infall for each galaxy in Gyr
* tau : e-folding time associated with sfr decline in Gyr
RETURNS
* dMstar : mass created since t infall, including mass loss
'''
dMstar = np.zeros(len(infall_sfr))
for i in range(len(infall_sfr)):
t = np.linspace(.002,infall_times[i],1000)
dt = t[1] - t[0]
dMstar[i] = infall_sfr[i]*dt*1.e9*np.sum(np.exp(-1*(infall_times[i]-t)/tau)*get_fraction_mass_retained(t*1.e9))
return dMstar
pass
###############################
##### MAIN SIMULATION FUNCTION
###############################
def run_sim(tmax = 3.,taumax=6,nstep_tau=10,nrandom=10,nmassmatch=10,drdtmin=-2,drdt_step=.1,model=1,plotsingle=True,maxboost=5,plotflag=True,rmax=4,boostflag=False,debug=False):
'''
run simulations of disk shrinking
PARAMS
------
* taumax : max value for e-folding time associated with SFR decline, in Gyr
* nstep_tau : number of steps to take between taumax and zero
* nmassmatch : number of times to repeat the mass matching between sim-core and core, at each step of tau
* tmax: maximum time that core galaxies have been in cluster, in Gyr; default = 2
* nrandom : number of random iterations for each value of dr/dt
* drdt_step : step size for drdt; range is between -2 and 0
* model : quenching model to use; can be 1 or 2
- 1 = shrink Re
- 2 = truncate disk
* boostflag : set this to boost central intensity; can set this for both model 1 and 2
* maxboost : max factor to boost SFRs by; Iboost/Ie
* rmax : max extent of disk for truncation model in terms of Re; disk shrinks as (rmax - dr/dt*tinfall)*Re_input
* plotsingle : default is True;
- use this to print a separate figure;
- set to False if creating a multi-panel plot
* plotflag : don't remember what this does
RETURNS
-------
* best_drdt : best dr/dt value (basically meaningless)
- b/c KS test is good at rejecting null hypothesis, but pvalue of 0.98 is not better than pvalue=0.97
* best_sim_core : best distribution of core sizes? (basically meaningless)
* ks_p_max : pvalue for best model (basically meaningless)
* all_drdt : dr/dt for every model
* all_p : p value for size comparison for every model
* all_p_sfr : p value for SFR comparison for every model
* all_boost : boost value for each model; this will be zeros if model != 3
'''
# pass in rmax
#rmax = float(args.rmax)
ks_D_min = 0
ks_p_max = 0
if debug:
nstep_tau = 2
nrandom = 2
nmassmatch = 2
all_mstar_simcore = []
all_sfr_simcore = []
npoints = int(nstep_tau*nrandom*nmassmatch)
all_p_dsfr = np.zeros(npoints)
all_p_sfr = np.zeros(npoints)
all_tau = np.zeros(npoints)
all_boost = np.zeros(npoints)
fquench_size = np.zeros(npoints)
fquench_sfr = np.zeros(npoints)
fquench = np.zeros(npoints) # for both constraints
tau_min = 0.5
dtau = (taumax-tau_min)/nstep_tau
# boost strapping
# randomly draw same sample size from external and core for each model
# try this to see how results are impacted
# repeat nrandom times for each time we select an infall sample from the field
for j in range(nrandom):
# GET SFR AND MSTAR AT t_infall
infall_times = np.linspace(0,tmax,len(external_sfr))
actual_infall_times = np.random.choice(infall_times, len(infall_times))
# external_sfr and external_mstar are linear
# need to match using log values
print('getting sfr/mstar at infall')
infall_logsfr, infall_logmstar = get_sfr_mstar_at_infall(np.log10(external_sfr),\
(external_logmstar),\
actual_infall_times)
print('done getting sfr/mstar at infall')
# select different values of tau
for i in range(nstep_tau):
tau = tau_min + i*dtau
# UPDATING PROCEDURE TO ACCOUNT FOR EVOLUTION OF SFR AND STELLAR MASS
# OF FIELD GALAXIES BETWEEN t_infall AND PRESENT.
if boostflag:
# model 3 involves boosting Ie in addition to truncating the disk,
# so this requires another loop where Iboost/Ie0 ranges from 1 to 5
# not sure if I can implement this as a third case
# or I could just assign a random boost for each iteration
# and increase nrandom when running model 3
#
# going with one boost factor per iteration for now
# obviously, it's not realistic that ALL galaxies
# would be boosted by the SAME factor
# but this is an easy place to start
boost = np.random.uniform(1,maxboost) # boost factor will range between 1 and maxboost
else:
boost = 1.0
########################################################
# get predicted SFR of core galaxies by multiplying the
# distribution of SFRs from the external samples by the
# flux ratio you would expect from shrinking the
# external sizes to the sim_core sizes
# SFRs are logged, so add log of frac_retained
sim_core_sfr = boost*(10.**infall_logsfr)*np.exp(-1*actual_infall_times/tau)
# calculate Mstar at z=0, give sfr decline and mass loss
sim_core_mstar = 10.**(infall_logmstar) + get_delta_mass(10.**infall_logsfr,\
actual_infall_times,tau)
sim_core_dsfr = np.log10(sim_core_sfr) - get_MS(sim_core_mstar)
if debug:
# these figures are just checking that our SFR quenching and
# mass increments are reasonable
plt.figure()
plt.plot(infall_logmstar, np.log10(sim_core_mstar) - infall_logmstar,'b.')
plt.xlabel('Mstar at Infall')
plt.ylabel('Mstar at z=0 - Infall')
plt.axhline(y=0,c='k',ls='--')
s = 'tau={}'.format(tau)
plt.title(s)
plt.figure()
plt.plot(infall_logsfr,infall_logsfr - np.log10(sim_core_sfr),'b.')
plt.axhline(y=0,c='k',ls='--')
plt.xlabel('SFR at Infall')
plt.ylabel('SFR at Infall - SFR at z=0')
plt.title(s)
# CREATE A SIMULATED CORE SAMPLE THAT IS MASS-MATCHED TO THE CORE
# repeat this 1000 times
# try parallelizing the mass_match call
#nproc = my.cpu_count()
#if nmassmatch < nproc:
# nproc = nmassmatch
#pool = mp.Pool(nproc)
#results = pool.apply(mass_match,args=(core_logmstar,np.log10(sim_core_mstar),nmatch=1)
for k in range(nmassmatch):
#for k in range(1):
print('\t ',k)
#aindex = nrandom*i+j
aindex = nstep_tau*nmassmatch*j + nmassmatch*i + k
#print(sim_core_mstar[0:10])
#print(core_logmstar[0:10])
#print('getting mass matched sample')
matched_indices = mass_match(core_logmstar,np.log10(sim_core_mstar),nmatch=1)
#print('done getting mass matched sample')
# KEEP THE MASS-MATCHED VALUES
sim_core_mstar_matched = sim_core_mstar[matched_indices]
sim_core_sfr_matched = sim_core_sfr[matched_indices]
sim_core_dsfr_matched = sim_core_dsfr[matched_indices]
# keep track of # that drop out due to size
# should be specific SFR rather than SFR limit
# should apply the ssfr > 11.5
quench_flag = sim_core_sfr_matched < min(external_sfr)
fquench_sfr[aindex] = sum(quench_flag)/len(quench_flag)
# removing flag to make sure things work as expected
D1,p1 = ks_2samp(core_sfr,sim_core_sfr_matched[~quench_flag])
D2,p2 = ks_2samp(core_dsfr,sim_core_dsfr_matched[~quench_flag])
#D2,p2 = ks_2samp(core_sfr,sim_core_sfr)
all_p_sfr[aindex] = p1
all_p_dsfr[aindex] = p2
all_boost[aindex] = boost
all_tau[aindex] = tau
return all_tau,all_boost,all_p_sfr,all_p_dsfr,fquench_sfr
###########################
##### PLOT FUNCTIONS
###########################
def plot_hexbin(all_drdt,all_p,best_drdt,tmax,gridsize=10,plotsingle=True):
if plotsingle:
plt.figure()
plt.subplots_adjust(bottom=.15,left=.12)
myvmax = 1.*len(all_drdt)/(gridsize**2)*4
#print 'myvmax = ',myvmax
plt.hexbin(all_drdt, all_p,gridsize=gridsize,cmap='gray_r',vmin=0,vmax=myvmax)
if plotsingle:
plt.colorbar(fraction=0.08)
plt.xlabel(drdt_label1,fontsize=18)
plt.ylabel(r'$p-value$',fontsize=18)
#s = r'$t_{max} = %.1f \ Gyr, \ dr/dt = %.2f \ Gyr^{-1}, \ t_{quench} = %.1f \ Gyr$'%(tmax, best_drdt,1./abs(best_drdt))
s = r'$t_{max} = %.1f \ Gyr$'%(tmax)
#plt.text(0.02,.7,s,transform = plt.gca().transAxes)
plt.title(s,fontsize=18)
output = 'sim_infall_tmax_%.1f.png'%(tmax)
plt.savefig(output)
def plot_frac_below_pvalue(all_drdt,all_p,all_p_sfr,tmax,nbins=100,plotsingle=True):
pvalue = args.pvalue
if plotsingle:
plt.figure()
plt.subplots_adjust(bottom=.15,left=.12)
mybins = np.linspace(min(all_drdt),max(all_drdt),100)
t= np.histogram(all_drdt,bins=mybins)
#print(t)
ytot = t[0]
xtot = t[1]
flag = all_p < 0.05
t = np.histogram(all_drdt[flag],bins=mybins)
#print(t)
y1 = t[0]/ytot
flag = all_p_sfr < 0.05
t = np.histogram(all_drdt[flag],bins=mybins)
y2 = t[0]/ytot
#plt.figure()
# calculate the position of the bin centers
xplt = 0.5*(xtot[0:-1]+xtot[1:])
plt.plot(xplt,y1,'bo',color=mycolors[0],markersize=6,label='R24/Re')
plt.plot(xplt,y2,'rs',color=mycolors[1],markersize=6,label='SFR')
plt.legend()
plt.xlabel(drdt_label1,fontsize=18)
plt.ylabel(r'$Fraction(p<{:.3f})$'.format(pvalue),fontsize=18)
#s = r'$t_{max} = %.1f \ Gyr, \ dr/dt = %.2f \ Gyr^{-1}, \ t_{quench} = %.1f \ Gyr$'%(tmax, best_drdt,1./abs(best_drdt))
s = r'$t_{max} = %.1f \ Gyr$'%(tmax)
#plt.text(0.02,.7,s,transform = plt.gca().transAxes)
plt.title(s,fontsize=18)
output = 'frac_pvalue_infall_tmax_%.1f.png'%(tmax)
plt.savefig(output)
output = 'frac_pvalue_infall_tmax_%.1f.pdf'%(tmax)
plt.savefig(output)
def plot_sfr_size(all_p,all_p_sfr,all_drdt,tmax,plotsingle=True):
if plotsingle:
plt.figure(figsize=(8,6))
plt.scatter(all_p,all_p_sfr,c=all_drdt,s=10,vmin=-1,vmax=0)
plt.xlabel('$p-value \ size$',fontsize=18)
plt.ylabel('$p-value \ SFR$',fontsize=18)
plt.axhline(y=.05,ls='--')
plt.axvline(x=.05,ls='--')
plt.axis([-.09,1,-.09,1])
ax = plt.gca()
#ax.set_yscale('log')
if plotsingle:
plt.colorbar(label='$dr/dt$')
plt.savefig('pvalue-SFR-size-tmax'+str(tmax)+'Gyr-shrink0.png')
def plot_frac_below_pvalue_sfr(all_tau,all_p_sfr,tmax,nbins=100,plotsingle=True,pvalue=0.05,color=None):
#pvalue = args.pvalue
if plotsingle:
plt.figure()
plt.subplots_adjust(bottom=.15,left=.12)
mybins = np.linspace(min(all_tau),max(all_tau),nbins)
t= np.histogram(all_tau,bins=mybins)
#print(t)
ytot = t[0]
xtot = t[1]
flag = all_p_sfr < pvalue
t = np.histogram(all_tau[flag],bins=mybins)
y2 = t[0]/ytot
#plt.figure()
# calculate the position of the bin centers
xplt = 0.5*(xtot[0:-1]+xtot[1:])
plotflag = ~np.isnan(y2)
x = xplt[plotflag]
y = y2[plotflag]
if color is None:
plt.plot(x,y,marker='s',markersize=6,label='tmax={:d}Gyr'.format(tmax))
else:
plt.plot(x,y,marker='s',markersize=6,label='tmax={:d}Gyr'.format(tmax),color=color)
print('pvalue = ',pvalue)
#s = r'$t_{max} = %.1f \ Gyr, \ dr/dt = %.2f \ Gyr^{-1}, \ t_{quench} = %.1f \ Gyr$'%(tmax, best_drdt,1./abs(best_drdt))
s = r'$t_{max} = %.1f \ Gyr$'%(tmax)
#plt.text(0.02,.7,s,transform = plt.gca().transAxes)
#plt.title(s,fontsize=18)
if plotsingle:
plt.legend()
plt.xlabel(r'$\tau (Gyr)$',fontsize=18)
plt.ylabel(r'$Fraction(p<{:.3f})$'.format(pvalue),fontsize=18)
output = 'frac_pvalue_sfr_infall_tmax_%.1f.png'%(tmax)
plt.savefig(output)
output = 'frac_pvalue_sfr_infall_tmax_%.1f.pdf'%(tmax)
plt.savefig(output)
return x,y
def plot_sfr_size(all_p,all_p_sfr,all_drdt,tmax,plotsingle=True):
if plotsingle:
plt.figure(figsize=(8,6))
plt.scatter(all_p,all_p_sfr,c=all_drdt,s=10,vmin=-1,vmax=0)
plt.xlabel('$p-value \ size$',fontsize=18)
plt.ylabel('$p-value \ SFR$',fontsize=18)
plt.axhline(y=.05,ls='--')
plt.axvline(x=.05,ls='--')
plt.axis([-.09,1,-.09,1])
ax = plt.gca()
#ax.set_yscale('log')
if plotsingle:
plt.colorbar(label='$dr/dt$')
plt.savefig('pvalue-SFR-size-tmax'+str(tmax)+'Gyr-shrink0.png')
def plot_multiple_tmax(nrandom=100):
plt.figure(figsize=(10,6))
plt.subplot(2,2,1)
best_drdt, best_sim_core,ks_p_max,all_drdt,all_p,all_p_sfr=run_sim(tmax=1,drdt_step=.05,nrandom=nrandom,plotsingle=False)
plot_sfr_size(all_p,all_p_sfr,all_drdt,tmax)
plt.subplot(2,2,2)
run_sim(tmax=2,drdt_step=.05,nrandom=nrandom,plotsingle=False)
plt.subplot(2,2,3)
run_sim(tmax=3,drdt_step=.05,nrandom=nrandom,plotsingle=False)
plt.subplot(2,2,4)
run_sim(tmax=4,drdt_step=.05,nrandom=nrandom,plotsingle=False)
plt.subplots_adjust(hspace=.5,bottom=.1)
plt.savefig('sim_infall_multiple_tmax.pdf')
plt.savefig('fig18.pdf')
def plot_multiple_tmax_wsfr(nrandom=100):
plt.figure(figsize=(12,6))
mytmax = [1,2,3,4]
allax = []
for i,tmax in enumerate(mytmax):
plt.subplot(2,4,i+1)
best_drdt, best_sim_core,ks_p_max,all_drdt,all_p,all_p_sfr=run_sim(tmax=tmax,drdt_step=.05,nrandom=nrandom,plotsingle=False)
allax.append(plt.gca())
plt.subplot(2,4,i+5)
#plot_sfr_size(all_p,all_p_sfr,all_drdt,tmax,plotsingle=False)
plot_frac_below_pvalue(all_drdt,all_p,all_p_sfr,tmax,nbins=100,pvalue=0.05,plotsingle=False)
allax.append(plt.gca())
plt.subplots_adjust(hspace=.5,wspace=.7,bottom=.1)
cb = plt.colorbar(ax=allax,label='$dr/dt$')
plt.savefig('sim_infall_multiple_tmax_wsfr.pdf')
plt.savefig('sim_infall_multiple_tmax_wsfr.png')
#plt.savefig('fig18.pdf')
def plot_multiple_tmax_wsfr2(nrandom=100):
plt.figure(figsize=(10,8))
mytmax = [1,2,3,4]
allax = []
for i,tmax in enumerate(mytmax):
plt.subplot(2,2,i+1)
if args.model < 3:
best_drdt, best_sim_core,ks_p_max,all_drdt,all_p,all_p_sfr=run_sim(tmax=tmax,drdt_step=.05,nrandom=nrandom,plotsingle=False,plotflag=False)
else:
best_drdt, best_sim_core,ks_p_max,all_drdt,all_p,all_p_sfr,boost=run_sim(tmax=tmax,drdt_step=.05,nrandom=nrandom,plotsingle=False,plotflag=False)
plot_frac_below_pvalue(all_drdt,all_p,all_p_sfr,tmax,nbins=100,plotsingle=False)
allax.append(plt.gca())
plt.subplots_adjust(hspace=.5,wspace=.5,bottom=.1)
#cb = plt.colorbar(ax=allax,label='$dr/dt$')
plt.savefig('sim_infall_multiple_tmax_wsfr.pdf')
plt.savefig('sim_infall_multiple_tmax_wsfr.png')
#plt.savefig('fig18.pdf')
def plot_results(core,external,sim_core,best_drdt,tmax):
plt.figure()
mybins = np.arange(0,2,.2)
plt.hist(core,bins=mybins,color='r',histtype='step',label='Core',lw='3',normed=True)
plt.hist(external,bins=mybins,color='b',ls='-',lw=3,histtype='step',label='External',normed=True)
plt.hist(sim_core,bins=mybins,color='k',hatch='//',histtype='step',label='Sim Core',normed=True)
plt.subplots_adjust(bottom=.15)
plt.xlabel('$R_{24}/R_d$', fontsize=22)
plt.ylabel('$Frequency$',fontsize=22)
s = '$dr/dt = %.2f /Gyr$'%(best_drdt)
plt.text(0.02,.7,s,transform = plt.gca().transAxes)
s = '$t_{quench} = %.1f Gyr$'%(1./abs(best_drdt))
plt.text(0.02,.65,s,transform = plt.gca().transAxes)
s = '$t_{max} = %.1f Gyr$'%(tmax)
plt.text(0.02,.6,s,transform = plt.gca().transAxes)
plt.legend(loc='upper left')
def plot_model3(all_drdt,all_p,all_p_sfr,boost,tmax=2):
'''plot boost factor vs dr/dt, colored by pvalue'''
plt.figure(figsize=(12,4))
plt.subplots_adjust(wspace=.5)
colors = [all_p,all_p_sfr]
labels = ['size p value','sfr p value']
titles = ['Size Constraints','SFR Constraints']
v2 = .005
allax = []
for i in range(len(colors)):
plt.subplot(1,2,i+1)
plt.scatter(all_drdt,boost,c=colors[i],vmin=0,vmax=v2,s=15)
plt.title(titles[i])
plt.xlabel(drdt_label1,fontsize=16)
plt.ylabel('I boost/I0',fontsize=16)
allax.append(plt.gca())
cb = plt.colorbar()
cb.set_label('KS p value')
plt.savefig(plotdir+'/model3-tmax'+str(tmax)+'-size-sfr-constraints.png')
plt.savefig(plotdir+'/model3-tmax'+str(tmax)+'-size-sfr-constraints.pdf')
def plot_boost_3panel(all_drdt,all_p,all_p_sfr,boost,tmax=2,v2=.005,model=3):
plt.figure(figsize=(14,4))
plt.subplots_adjust(wspace=.01,bottom=.2)
colors = [all_p,all_p_sfr,np.minimum(all_p,all_p_sfr)]
labels = ['size p value','sfr p value','min p value']
titles = ['Size Constraints','SFR Constraints','minimum(Size, SFR)']
allax = []
psize=30
for i in range(len(colors)):
plt.subplot(1,3,i+1)
plt.scatter(all_drdt,boost,c=colors[i],vmin=0,vmax=v2,s=psize)
plt.title(titles[i],fontsize=20)
if i == 0:
plt.ylabel('$I_{boost}/I_e$',fontsize=24)
else:
y1,y2 = plt.ylim()
#t = plt.yticks()
#print(t)
plt.yticks([])
plt.ylim(y1,y2)
if model == 1:
plt.xlabel(drdt_label1,fontsize=24)
else:
plt.xlabel(drdt_label2,fontsize=24)
allax.append(plt.gca())
cb = plt.colorbar(ax=allax,fraction=.08)
cb.set_label('KS p value')
plt.savefig(plotdir+'/model3-tmax'+str(tmax)+'-size-sfr-constraints-3panel.png')
plt.savefig(plotdir+'/model'+str(model)+'-tmax'+str(tmax)+'-size-sfr-constraints-3panel.pdf')
def plot_drdt_boost_ellipse(all_drdt,all_p,all_p_sfr,boost,tmax=2,levels=None,model=3,figname=None,alpha=.5,nbins=20):
'''plot error ellipses of drdt and boost'''
plt.figure(figsize=(6,6))
plt.subplots_adjust(left=.15,bottom=.2)
allz = [all_p,all_p_sfr]
allcolors = [mycolors[0],'0.5']
labels = ['size p value','sfr p value']
titles = ['Size Constraints','SFR Constraints']
if levels is None:
levels = [.05,1]
else:
levels = levels
allax = []
psize=30
for i in range(len(allz)):
xgrid,ygrid,zgrid = grid_xyz(all_drdt,boost,allz[i],nbins=nbins)
plt.contourf(xgrid,ygrid,zgrid,colors=allcolors[i],levels=levels,label=titles[i],alpha=alpha)
if i == 0:
zgrid0=zgrid
# define region where both are above .05
zcomb = np.minimum(zgrid0,zgrid)
plt.contour(xgrid,ygrid,zcomb,linewidths=4,colors='k',levels=[.05,1])
plt.ylabel('$SFR \ Boost \ Factor \ (I_{boost}/I_o)$',fontsize=20)
if model == 1:
plt.xlabel(drdt_label1,fontsize=20)
else:
plt.xlabel(drdt_label2,fontsize=20)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
#plt.legend()
#plt.xlim(-2,0)
plt.text(.05,.9,'Model '+str(model),transform=plt.gca().transAxes,horizontalalignment='left',fontsize=20)
if figname is not None:
plt.savefig(plotdir+'/'+figname+'.png')
plt.savefig(plotdir+'/'+figname+'.pdf')
def plot_model1_3panel(all_drdt,all_p,all_p_sfr,tmax=2,v2=.005,model=1,vmin=-4):
'''
make a 1x3 plot showing
(1) pvalue vs dr/dt for size
(2) pvalue vs dr/dt for SFR
(3) pvalue size vs pvalue SFR, color coded by dr/dt
PARAMS
------
* all_drdt : output from run_sum; disk-shrinking rate for each model
* all_p : output from run_sum; KS pvalue for size comparision
* all_p_sfr : output from run_sum; KS pvalue for SFR comparison
* tmax : tmax of simulation, default is 2 Gyr
* v2 : max value for colorbar; default is 0.005 for 2sigma
* model : default is 1; could use this plot for models 1 and 2
OUTPUT
------
* save png and pdf plot in plotdir
* title is: model3-tmax'+str(tmax)+'-size-sfr-constraints-3panel.pdf
'''
plt.figure(figsize=(14,4))
plt.subplots_adjust(wspace=.5,bottom=.15)
xvars = [all_drdt, all_drdt, all_p]
yvars = [all_p, all_p_sfr, all_p_sfr]
if model == 1:
drdt_label = drdt_label1
else:
drdt_label = drdt_label2
xlabels=[drdt_label,drdt_label,'pvalue Size']
ylabels=['pvalue Size','pvalue SFR','pvalue SFR']
titles = ['Size Constraints','SFR Constraints','']
allax = []
for i in range(len(xvars)):
plt.subplot(1,3,i+1)
if i < 2:
plt.scatter(xvars[i],yvars[i],s=10,alpha=.5)
plt.title(titles[i])
else:
# plot pvalue vs pvalue, color coded by dr/dt
plt.scatter(all_p,all_p_sfr,c=all_drdt,vmin=vmin,vmax=0,s=5)
plt.title('Size \& SFR Constraints')
plt.xlabel(xlabels[i],fontsize=20)
plt.ylabel(ylabels[i],fontsize=20)
plt.ylim(-.02,1.02)
allax.append(plt.gca())
cb = plt.colorbar(ax=allax,fraction=.08)
cb.set_label('dr/dt')
plt.axhline(y=.05,ls='--')
plt.axvline(x=.05,ls='--')
ax = plt.gca()
#plt.axis([-.01,.35,-.01,.2])
xl = np.linspace(.05,1,100)
y1 = np.ones(len(xl))
y2 = .05*np.ones(len(xl))
plt.fill_between(xl,y1=y1,y2=y2,alpha=.1)
plt.savefig(plotdir+'/model'+str(model)+'-tmax'+str(tmax)+'-size-sfr-constraints-3panel.png')
plt.savefig(plotdir+'/model'+str(model)+'-tmax'+str(tmax)+'-size-sfr-constraints-3panel.pdf')
def plot_quenched_fraction(all_drdt,all_boost, fquench_size,fquench_sfr,fquench,vmax=.5,model=1):
plt.figure(figsize=(14,4))
#plt.subplots_adjust(bottom=.15,left=.1)
plt.subplots_adjust(wspace=.01,bottom=.2)
allax=[]
colors = [fquench_size, fquench_sfr,fquench]
# total quenching is the same as SFR quenching
# can't have galaxies with zero size that still has SFR above detection limit
# therefore, only need first two panels
for i in range(len(colors)-1):
plt.subplot(1,3,i+1)
plt.scatter(all_drdt,all_boost,c=colors[i],vmin=0,vmax=vmax)
allax.append(plt.gca())
if model == 1:
drdt_label = drdt_label1
else:
drdt_label = drdt_label2
plt.xlabel(drdt_label,fontsize=24)
if i == 0:
plt.ylabel('$I_{boost}/I_e$',fontsize=24)
plt.title('Frac with $R_{24} = 0$',fontsize=20)
if i == 1:
plt.yticks([])
plt.title('Frac with $SFR < Limit$',fontsize=20)
if i == 2:
plt.yticks([])
plt.title('Combined Fractions',fontsize=20)
plt.colorbar(ax=allax,label='Fraction',fraction=.08)
def compare_single(var,flag1,flag2,xlab):
xmin=min(var)
xmax=max(var)
print('KS test comparing members and exterior')
(D,p)=lcommon.ks(var[flag1],var[flag2])
plt.xlabel(xlab,fontsize=18)
plt.hist(var[flag1],bins=len(var[flag1]),cumulative=True,histtype='step',normed=True,label='Core',range=(xmin,xmax),color='k')
plt.hist(var[flag2],bins=len(var[flag2]),cumulative=True,histtype='step',normed=True,label='Infall',range=(xmin,xmax),color='0.5')
plt.legend(loc='upper left')
plt.ylim(-.05,1.05)
ax=plt.gca()
plt.text(.9,.25,'$D = %4.2f$'%(D),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
plt.text(.9,.1,'$p = %5.4f$'%(p),horizontalalignment='right',transform=ax.transAxes,fontsize=16)
return D, p
def compare_cluster_exterior(sizes,coreflag,infallflag):
plt.figure(figsize=(8,6))
plt.subplots_adjust(bottom=.15,hspace=.4,top=.95)
plt.subplot(2,2,1)
compare_single(sizes['logMstar'],flag1=coreflag,flag2=infallflag,xlab='$ log_{10}(M_*/M_\odot) $')
plt.legend(loc='upper left')
plt.xticks(np.arange(9,12,.5))
#plt.xlim(8.9,11.15)
plt.subplot(2,2,2)
compare_single(sizes['B_T_r'],flag1=coreflag,flag2=infallflag,xlab='$GIM2D \ B/T $')
plt.xticks(np.arange(0,1.1,.2))
plt.xlim(-.01,.3)
plt.subplot(2,2,3)
compare_single(sizes['ZDIST'],flag1=coreflag,flag2=infallflag,xlab='$ Redshift $')
plt.xticks(np.arange(0.02,.055,.01))
plt.xlim(.0146,.045)
plt.subplot(2,2,4)
compare_single(sizes['logSFR'],flag1=coreflag,flag2=infallflag,xlab='$ \log_{10}(SFR/M_\odot/yr)$')
plt.text(-1.5,1,'$Cumulative \ Distribution$',fontsize=22,transform=plt.gca().transAxes,rotation=90,verticalalignment='center')
if __name__ == '__main__':
# run program
print('Welcome!')
#if args.model == 3:
# best_drdt, best_sim_core,ks_p_max,all_drdt,all_p,all_p_sfr,all_boost = run_sim(tmax=args.tmax,drdt_step=0.05,nrandom=100,rmax=6)
#else:
# best_drdt, best_sim_core,ks_p_max,all_drdt,all_p,all_p_sfr = run_sim(tmax=args.tmax,drdt_step=0.05,nrandom=100,rmax=6)
# plot
#plot_frac_below_pvalue(all_drdt,all_p,best_drdt,args.tmax,nbins=100,pvalue=0.05,plotsingle=True)
# read in data file (should only do this once though, right?)
lookup_table = fits.getdata('/home/rfinn/research/LCS/sfr_modeling/forward_model_sfms.fits')
pass
|
rfinn/LCS
|
python/LCSsimulate-infall-sfrs.py
|
Python
|
gpl-3.0
| 39,087
|
[
"Galaxy"
] |
ee9a68dd8e983e490cc034547f58ffb931ff189eba2581d184ad0e96a73fdd21
|
import bpy
from bpy.props import BoolProperty,IntProperty,FloatProperty,EnumProperty,PointerProperty,FloatVectorProperty,StringProperty
from . import config
# def SetResolution(self, context):
# self.width = max(int(context.scene.blcloudrender.res_p*context.scene.blcloudrender.res_x),1);
# self.height = max(int(context.scene.blcloudrender.res_p*context.scene.blcloudrender.res_y),1);
# context.scene.render.resolution_x = self.width;
# context.scene.render.resolution_y = self.height;
# context.scene.render.resolution_percentage = 100;
# class ClRenderProperties(bpy.types.PropertyGroup):
# res_x = IntProperty(name="Res.X",default=1920,min=1,description="Image width in pixels",update=SetResolution);
# res_y = IntProperty(name="Res.Y",default=1080,min=1,description="Image height in pixels",update=SetResolution);
# res_p = FloatProperty(name="Res.%",default=0.5,min=0.01,max=1,description="Resolution percentage",update=SetResolution);
#
# def draw(self, context, layout):
# layout.row().label("Dimensions:");
#
# s = layout.split();
# c = s.column();
# c.row().prop(context.scene.render,"resolution_x");
# c.row().prop(context.scene.render,"resolution_y");
#
# c = s.column();
# c.row().prop(context.scene.render,"resolution_percentage");
class ClRenderPanel(bpy.types.Panel):
bl_idname = "ClRenderPanel";
bl_label = "Render";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "render";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid;
def draw(self, context):
#context.scene.blcloudrender.draw(context,self.layout);
self.layout.row().label("Dimensions:");
s = self.layout.split();
c = s.column();
c.row().prop(context.scene.render,"resolution_x");
c.row().prop(context.scene.render,"resolution_y");
c = s.column();
c.row().prop(context.scene.render,"resolution_percentage");
class ClSamplingProperties(bpy.types.PropertyGroup):
samples = IntProperty(name="Render",default=1000,min=1,description="Number of samples to be taken for each pixel.");
scatterevs = IntProperty(name="Scattering",default=500,min=0,max=1000,description="Maximum volume scattering events. Recursivity is employed to compute the subsequent inscattering contributions. For large numbers stack size should be adequate to prevent overflows.");
msigmas = FloatProperty(name="Sigma.S",default=80.0,min=0.001,description="Macroscopic scattering cross section for maximum density.");
msigmaa = FloatProperty(name="Sigma.A",default=0.001,min=0.001,description="Macroscopic absorption cross section for maximum density.");
phasef = EnumProperty(name="Phase function",default="M",items=(
("H","Henyey-Greenstein","Henyey-Greenstein phase function. A fast approximation with plausible results."),
("M","Mie","Precomputed RGB Mie phase function for typical cloud droplets. Being the most accurate this is also the most inefficient due to partly unvectorized table lookups. Note that spectral rendering is required to correctly sample for different wavelengths, although in case of Mie the dispersion is small enough to be approximated without separating the RGB channels.")));
phasea = FloatProperty(name="Anisotropy",default=0.75,description="Anisotropy parameter 'g' for the Henyey-Greenstein phase function.");
def draw(self, context, layout):
s = layout.split();
c = s.column();
c.row().label("Samples:");
c.row().prop(self,"samples");
#seed, default 1000
c.row().label("Light transport:");
c.row().prop(self,"msigmas");
c.row().prop(self,"msigmaa");
c = s.column();
c.row().label("Path tracing:");
c.row().prop(self,"scatterevs");
c.row().label("Phase function:");
c.row().prop(self,"phasef");
if self.phasef == "H":
c.row().prop(self,"phasea");
class ClSamplingPanel(bpy.types.Panel):
bl_idname = "ClSamplingPanel";
bl_label = "Sampling";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "render";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid;
def draw(self, context):
context.scene.blcloudsampling.draw(context,self.layout);
class ClGridProperties(bpy.types.PropertyGroup):
detailsize = FloatProperty(name="Detail size",default=0.01,min=0.0001,precision=4,description="Smallest detail size during scene costruction in blender units.");
maxdepth = IntProperty(name="Max Depth",default=12,min=1,description="Maximum octree depth. Limiting depth to smaller values increases render performance, but at the cost of less sparse data and higher memory requirements.");
qfbandw = FloatProperty(name="Band",default=1.0,min=0.01,precision=2,description="Outer narrow-band width of the low-resolution distance query field. This field is only constructed when the 'distance' output of the SceneInfo-node is used. A separate low-resolution field is created to allow approximate distance evaluation in larger global domains, as opposed to tight and local surface-surrounding field of the high-resolution field.");
def draw(self, context, layout):
s = layout.split();
c = s.column();
c.row().label("Resolution:");
c.row().prop(self,"detailsize");
c.row().label("Octree:");
c.row().prop(self,"maxdepth");
c = s.column();
c.row().label("SceneInfo Query:");
c.row().prop(self,"qfbandw");
class ClGridPanel(bpy.types.Panel):
bl_idname = "ClGridPanel";
bl_label = "Grid";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "render";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid;
def draw(self, context):
context.scene.blcloudgrid.draw(context,self.layout);
class ClPerformanceProperties(bpy.types.PropertyGroup):
tilex = IntProperty(name="X",default=128,min=4,description="Horizontal tile size. By design all threads contribute to one tile simultaneously."); #step=2
tiley = IntProperty(name="Y",default=128,description="Vertical tile size. By design all threads contribute to one tile simultaneously.");
#deprecated ######
cache = BoolProperty(name="Enable",default=False,description="Enable the grid disk caching for individual objects. Until the object cache is reconstructed, the object is unaffected by any changes to it or its nodes.");
cachelayer = IntProperty(name="Layer",default=10,min=0,max=19,description="Objects in this scene layer are read from the cache, or written to it if the cache doesn't exist. Remove the object from this layer to reconstruct the cache, or manually delete the cache files.");
cachedir = StringProperty(name="Path",subtype="DIR_PATH",default="/tmp/",description="Location for the VDB cache.");
samples = IntProperty(name="Int.Samples",default=100,min=1,description="Maximum number of samples taken internally by the render engine before returning to update the render result. Higher number of internal samples results in slightly faster render times, but also increases the interval between visual updates.");
def draw(self, context, layout):
s = layout.split();
c = s.column();
c.row().label("Tiles:");
c.row().prop(self,"tilex");
c.row().prop(self,"tiley");
c.row().label("Internal sampling:");
c.row().prop(self,"samples");
c = s.column();
c.row().label("Caching:");#,icon="FILE");
c.row().prop(self,"cache");
c.row().prop(self,"cachelayer");
c.row().label("Location:");
c.row().prop(self,"cachedir");
class ClPerformancePanel(bpy.types.Panel):
bl_idname = "ClPerformancePanel";
bl_label = "Performance";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "render";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid;
def draw(self, context):
context.scene.blcloudperf.draw(context,self.layout);
class ClLayerPanel(bpy.types.Panel):
bl_idname = "ClLayerPanel";
bl_label = "Layer";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "render_layer";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid;
def draw(self, context):
#self.layout.row().prop(context.scene.blcloudrender,"layer");
#pass #context.scene.blcloudperf.draw(context,self.layout);
s = self.layout.split();
s.column().prop(context.scene,"layers",text="Scene");
s.column().prop(context.scene.render.layers.active,"layers",text="Layer");
# #proxy to enable render results outputs and rename them
# def EnableDirectional(self, context):
# context.scene.render.layers.active.use_pass_transmission_direct = context.scene.blcloudpasses.dirlight;
# for i in context.scene.node_tree.nodes:
# if i.type != "R_LAYERS":
# continue;
# n = i.outputs.find("TransDir");
# #i.outputs[n].name = "Directional";
# i.outputs[n].enabled = context.scene.blcloudpasses.dirlight;
#
# def EnableEnvironment(self, context):
# context.scene.render.layers.active.use_pass_transmission_indirect = context.scene.blcloudpasses.envlight;
# for i in context.scene.node_tree.nodes:
# if i.type != "R_LAYERS":
# continue;
# n = i.outputs.find("TransInd");
# #i.outputs[n].name = "Environment";
# i.outputs[n].enabled = context.scene.blcloudpasses.envlight;
#
# class ClPassProperties(bpy.types.PropertyGroup):
# dirlight = BoolProperty(name="Directional",default=False,update=EnableDirectional);
# envlight = BoolProperty(name="Environment",default=False,update=EnableEnvironment);
#
# def draw(self, context, layout):
# layout.row().prop(self,"dirlight");
# layout.row().prop(self,"envlight");
class ClPassPanel(bpy.types.Panel):
bl_idname = "ClPassPanel";
bl_label = "Passes";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "render_layer";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid;
def draw(self, context):
self.layout.row().label("Directional and environment lighting");
self.layout.row().prop(context.scene.render.layers.active,"use_pass_combined"); #update(), render()
self.layout.row().prop(context.scene.render.layers.active,"use_pass_transmission_direct");
self.layout.row().prop(context.scene.render.layers.active,"use_pass_transmission_indirect");
#context.scene.blcloudpasses.draw(context,self.layout);
self.layout.row().label("Shadow mask for external depth buffer");
self.layout.row().prop(context.scene.render.layers.active,"use_pass_shadow");
def TextureSelectionRGB(self, context):
return [("(droplet.nan)","( Not Used )","Map disabled","X",0)]\
+list(filter(lambda t: bpy.data.images[t[0]].channels == 4 and t[0] != "Render Result" and t[0] != "Viewer Node",[(m.name,m.name,m.name,"IMAGE_COL",x+1) for x, m in enumerate(bpy.data.images)])); #IMAGE_RGP
def TextureSelectionR(self, context):
return [("(droplet.nan)","( Not Used )","Map disabled","X",0)]\
+list(filter(lambda t: bpy.data.images[t[0]].channels == 4 and t[0] != "Render Result" and t[0] != "Viewer Node",[(m.name,m.name,m.name,"TEXTURE",x+1) for x, m in enumerate(bpy.data.images)]));
class ClEnvironmentProperties(bpy.types.PropertyGroup):
envtex = EnumProperty(name="Environment Map",items=TextureSelectionRGB,description="Equirectangularly mapped environment texture to be used (sky and groud). The image should be low-frequency and should not include the sun. Droplet ignores the zeroth order environment lighting, so that optionally different background may be used for alpha compositing.");
depthtex = EnumProperty(name="Depth Map",items=TextureSelectionR,description="The optionial depth texture from the primary render engine. This can be used to calculate local shadowing for the reconstructed locations (shadow pass). The camera properties should match the primary render (view, projection) and the map should be unnormalized and linear. Cycles depth output is readily usable, assuming that the camera is shared between the two renders. Filtering may also have to be disabled (Gaussian, width = ~0) for correct results.");
depthcomp = BoolProperty(name="Depth Composition",default=False,description="Enable depth map occlusion testing. Use the depth map to limit the camera ray traversal.");
occlusion = BoolProperty(name="Occlusion Geometry",default=False,description="Enable holdout geometry occlusion testing. Every object marked as holdout will occlude rays creating shadows and lightshafts. Holdout object itself is not visible. This feature may have a significant performance impact, and requires Droplet to be built with Intel Embree.");
def draw(self, context, layout):
layout.row().label("Environment Lighting:");
layout.row().prop(self,"envtex");
layout.row().label("Render Compositing:");
layout.row().prop(self,"depthtex"); #TODO: allow only 1-channel textures
layout.row().prop(self,"depthcomp");
layout.row().prop(self,"occlusion");
class ClEnvironmentPanel(bpy.types.Panel):
bl_idname = "ClEnvironmentPanel";
bl_label = "Environment"
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "world";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid;
def draw(self, context):
context.scene.world.droplet.draw(context,self.layout);
#https://wiki.blender.org/index.php/Linking_Custom_Node_Tree_to_DataBlock
def NodeGroupSelection(self, context):
#ml = sorted(context.scene.timeline_markers,key = lambda m: m.frame,reverse=True);
#return [(m.name, m.name, m.name, x) for x, m in enumerate(ml)];
#return [(m.name,m.name,m.name,"NODETREE",x) for x, m in enumerate(bpy.data.node_groups)];
return list(filter(lambda t: bpy.data.node_groups[t[0]].bl_idname == "ClNodeTree",[(m.name,m.name,m.name,"NODETREE",x) for x, m in enumerate(bpy.data.node_groups)]));
class ClObjectProperties(bpy.types.PropertyGroup):
holdout = BoolProperty(name="Holdout Mesh",default=False,description="Tell Droplet that this is a holdout mesh. Holdouts will occlude rays and create shadowing among clouds. This is also required when compositing with results from other render engines. Available only if \"occlusion geometry\" option is enabled and Droplet was built with Intel Embree support.");
nodetree = EnumProperty(name="Node group",items=NodeGroupSelection,description="Node group to be used for this object");
#cache
#vdbdir = StringProperty(name="Directory",subtype="DIR_PATH",default="/tmp/");
# vdbsdf = BoolProperty(name="Surface",default=False,description="Enable surface cache for this object. Changes to the scene or node setup won't affect the result, until the cache is recomputed.");
# vdbfog = BoolProperty(name="Fog",default=False,description="Enable fog cache for this object. Changes to the scene or node setup won't affect the result, until the cache is recomputed.");
#smoke
vdbcache = StringProperty(name="File",subtype="FILE_PATH",description="Path to the OpenVDB .vdb cache. Required if the node tree makes use of the SmokeCache. Can be set to point to the Blender produced .vdb cache of desired frame (smoke simulations), for example. Loaded density and/or velocity grids will be upsampled to match current grid resolution.");
vdbrho = StringProperty(name="Density",default="density",description="Density grid name. For Blender smoke caches, default value can be used. Leave empty if unavailable.");
vdbvel = StringProperty(name="Velocity",default="velocity",description="Velocity grid name. For Blender smoke caches, default value can be used. Leave empty if unavailable.");
class ClMaterialPanel(bpy.types.Panel):
bl_idname = "ClMaterialPanel";
bl_label = "Surface";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "material";
bl_options = {'HIDE_HEADER'};
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid and context.active_object.type == 'MESH';
def draw(self, context):
self.layout.row().prop(context.object.droplet,"holdout");
if not context.object.droplet.holdout:
self.layout.row().prop(context.object.droplet,"nodetree");
# class ClCachePanel(bpy.types.Panel):
# bl_idname = "ClCachePanel";
# bl_label = "OpenVDB disk cache";
# bl_space_type = "PROPERTIES";
# bl_region_type = "WINDOW";
# bl_context = "material";
#
# @classmethod
# def poll(cls, context):
# return context.scene.render.engine == config.dre_engineid and context.active_object.type == 'MESH';
#
# def draw(self, context):
# #TODO: cache entries should be grid resolution dependent
# #self.layout.row().prop(context.object.droplet,"vdbdir");
# self.layout.row().prop(context.object.droplet,"vdbsdf");
# self.layout.row().prop(context.object.droplet,"vdbfog");
class ClSmokePanel(bpy.types.Panel):
bl_idname = "ClSmokePanel";
bl_label = "OpenVDB smoke cache";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "material";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid and context.active_object.type == 'MESH';
def draw(self, context):
self.layout.row().label("VDB source for the SmokeCache node");
self.layout.row().prop(context.object.droplet,"vdbcache");
self.layout.row().prop(context.object.droplet,"vdbrho");
self.layout.row().prop(context.object.droplet,"vdbvel");
class ClParticleSystemProperties(bpy.types.PropertyGroup):
nodetree = EnumProperty(name="Node group",items=NodeGroupSelection,description="Node group to be used for this particle system");
def draw(self, context, layout):
layout.row().prop(self,"nodetree");
class ClParticleSystemPanel(bpy.types.Panel):
bl_idname = "ClParticleSystemPanel";
bl_label = "Render";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "particle";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid and context.particle_system != None;
def draw(self, context):
context.particle_system.settings.droplet.draw(context,self.layout);
class ClLampProperties(bpy.types.PropertyGroup):
intensity = FloatProperty(name="Intensity",default=1.0,min=0.0);
color = FloatVectorProperty(name="Color",default=[1,1,1],subtype='COLOR',size=3);
angle = FloatProperty(name="Angle",default=0.010,min=0.0,max=1.0,precision=3);
def draw(self, context, layout):
s = layout.split();
c = s.column();
c.row().prop(self,"intensity");
c.row().prop(self,"angle");
c = s.column();
c.row().prop(self,"color");
class ClLampPanel(bpy.types.Panel):
bl_idname = "ClLampPanel";
bl_label = "Lamp";
bl_space_type = "PROPERTIES";
bl_region_type = "WINDOW";
bl_context = "data";
@classmethod
def poll(cls, context):
return context.scene.render.engine == config.dre_engineid and context.active_object.type == 'LAMP';
def draw(self, context):
context.object.data.droplet.draw(context,self.layout);
|
jaelpark/droplet-render
|
addon/panel.py
|
Python
|
bsd-3-clause
| 18,741
|
[
"Gaussian"
] |
6829416b853185246031e05b649edd9b912b0017732a09d359a4807814e66df3
|
#!/usr/local/bin/env python
"""
Various Python utilities for OpenMM.
"""
from openmmtools import testsystems, integrators, alchemy, mcmc, states, cache, utils, constants, forces, forcefactories, storage, multistate
# Handle versioneer
from ._version import get_versions
versions = get_versions()
__version__ = versions['version']
__git_revision__ = versions['full-revisionid']
del get_versions, versions
|
choderalab/openmmtools
|
openmmtools/__init__.py
|
Python
|
mit
| 408
|
[
"OpenMM"
] |
e281723b60cc5c60362c08b24707c4abf448eb73bb367e9249e64c50a48e7706
|
"""Implementation of Apache VFS schemes and URLs."""
import os
from rasterio.compat import urlparse
# NB: As not to propagate fallacies of distributed computing, Rasterio
# does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only
# the following local filesystem schemes are supported.
SCHEMES = {'gzip': 'gzip', 'zip': 'zip', 'tar': 'tar', 'https': 'curl',
'http': 'curl', 's3': 's3'}
def parse_path(uri, vfs=None):
"""Parse a URI or Apache VFS URL into its parts
Returns: tuple
(path, archive, scheme)
"""
archive = scheme = None
path = uri
if vfs:
parts = urlparse(vfs)
scheme = parts.scheme
archive = parts.path
if parts.netloc and parts.netloc != 'localhost': # pragma: no cover
archive = parts.netloc + archive
else:
parts = urlparse(path)
scheme = parts.scheme
path = parts.path
if parts.netloc and parts.netloc != 'localhost':
path = parts.netloc + path
# There are certain URI schemes we favor over GDAL's names.
if scheme in SCHEMES:
parts = path.split('!')
path = parts.pop() if parts else None
archive = parts.pop() if parts else None
# For filesystem paths.
elif scheme in (None, '', 'file'):
pass
# We permit GDAL's idiosyncratic URI-like dataset paths such as
# 'NETCDF:...' to fall right through with no parsed archive
# or scheme.
else:
archive = scheme = None
path = uri
return path, archive, scheme
def vsi_path(path, archive=None, scheme=None):
"""Convert a parsed path to a GDAL VSI path."""
# If a VSF and archive file are specified, we convert the path to
# a GDAL VSI path (see cpl_vsi.h).
if scheme and scheme.startswith('http'):
result = "/vsicurl/{0}://{1}".format(scheme, path)
elif scheme and scheme == 's3':
result = "/vsis3/{0}".format(path)
elif scheme and scheme != 'file':
path = path.strip('/')
result = '/'.join(
['/vsi{0}'.format(scheme), archive, path])
else:
result = path
return result
|
ryfeus/lambda-packs
|
Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/rasterio/vfs.py
|
Python
|
mit
| 2,206
|
[
"NetCDF"
] |
95ab3e25283d2aaf728705bea3a74ac0501f833e812761c03d6fd20555d00a89
|
'''<h1>Library for surface x-ray diffraction simulations</h1>
<p> The problem of modelling the sample is divided to four
different classes: Sample, Slab, UnitCell and Instrument.
A Slab is the basic unit that builds up a sample and can
be seen as a quasi-unitcell for the sxrd problem.
Stricitly it is a 2D unitcell with a finite extension
out-of-plane. The Sample is then built from these Slabs one slab for
the bulk and a list of slabs for the surface structure.
<p> The unitcell consists of parameters for the unitcell and the
instrument contains instrument variables. See below for a full list.
<h2>Classes</h2>
<h3>Slab</h3>
<code> Slab(c = 1.0, slab_oc = 1.0)</code><br>
<dl>
<dt><code><b>c</b></code></dt>
<dd> A scale factor for ou-of-plane extension of the
Slab. All z-positions will be scaled with this factor.</dd>
<dt><code><b>slab_oc</b></code></dt>
<dd> A global scaling of the occupancy of all atoms in the
slab.</dd>
</dl>
<code> [Slab].add_atom(id, el, x, y, z, u = 0, oc = 1.0, m = 1.0)</code><br>
<dl>
<dt><code><b>id</b></code></dt>
<dd>A unique string identifier </dd>
<dt><code><b>el</b></code></dt>
<dd>The element described in a string. Note that
ions is denoted as "Sr2p" and "O2m" where 2 is the oxidation number and
p and m denoted plus and minus charge.</dd>
<dt><code><b>x</b></code></dt>
<dd> The x-position in Slab unit cell coords (same as given by the
UnitCell)</dd>
<dt><code><b>y</b></code></dt>
<dd> The y-position in Slab unit cell coords (same as given by the
UnitCell)</dd>
<dt><code><b>z</b></code></dt>
<dd> The z-position in Slab unit cell coords (The Unitcell c scaled by
a factor of the c-value for the slab)</dd>
<dt><code><b>u</b></code></dt>
<dd> The mean-square displacement for the atom</dd>
<dt><code><b>oc</b></code></dt>
<dd> The occupancy of the atom</dd>
<dt><code><b>m</b></code></dt>
<dd> The multiplicity of the site, defined as in the international tables
of crystallogrphy. Note that it is plane goups and NOT space groups that
will produce valid results.</dd>
</dl>
<code> [Slab].copy()</code><br>
Creates a copy of object [Slab]. This decouples the new object
returned by copy from the original [Slab].
<code> [Slab].find_atoms(expression)</code><br>
Function to locate atoms in a slab in order to connect parameters
between them. Returns an AtomGroup.
<dl>
<dt><code><b>expression</b></code></dt>
<dd> Either a list of the same length as the number of atoms or
a string that will evaluate to true or false for each atom.
Allowed variables are: <code>x, y, z, id, el, u, ov, m,/code></dd>
</dl>
<code> [Slab].all_atoms()</code><br>
Yields all atoms inside a slab as an AtomGroup.
Returns an AtomGroup.
<code> [Slab][id]</code><br>
Locates atom that has id <code>id</code>. Returns an AtomGroup
<dl>
<dt><code><b>id</b></code></dt>
<dd>Uniqe string identifer for one atom </dd>
</dl>
<h3>Sample</h3>
<code> Sample(inst, bulk_slab, slabs, unit_cell, surface_sym = [],
bulk_sym = []) </code><br>
<dl>
<dt><code><b>inst</b></code></dt>
<dd> Instrument object for the sample
</dd>
<dt><code><b>bulk_slab</b></code></dt>
<dd>The Slab that describes the bulk strucutre
</dd>
<dt><code><b>slabs</b></code></dt>
<dd>A list ([]) of slabs for the surface structure
</dd>
<dt><code><b>unit_cell</b></code></dt>
<dd>A UnitCell object
</dd>
<dt><code><b>surface_sym</b></code></dt>
<dd>A list ([]) of SymTrans objects describing the surface symmetry.
Default value - an empty list will implement a p1 symmetry, that is no
symmetry operations at all.
</dd>
<dt><code><b>bulk_sym</b></code></dt>
<dd>A list ([]) of SymTrans objects describing the bulk symmetry.
Default value - an empty list will implement a p1 symmetry, that is
no symetry operations at all.
</dd>
</dl>
<code>[Sample].calc_f(h, k, l)</code><br>
Calculates the total structure factor (complex number) from the
the surface and bulk strucutre. Returns an array of the same size
as h, k, l. (h, k, l should be of the same legth and is given in
coordinates of the reciprocal lattice as defnined by the uit_cell coords)
<code>[Sample].turbo_calc_f(h, k, l)</code><br>
A faster version of <code>calc_f</code> which uses inline c code to increase
the speed. Can be more unstable than <code>calc_f</code> use on your own risk.
<code>[Sample].calc_rhos(x, y, z, sb)</code><br>
Calculate the the surface electron density of a model. The parameter sb is a Gaussian convolution factor given the width of the Gaussian in reciprocal space.
Used mainly for comparison with direct methods, i.e. DCAF.
NOTE that the transformation from the width of the window function given
in <code>dimes.py</code> is <code>sqrt(2)*pi*[]</code>
'''
import numpy as np
import genx.models.utils as utils
sxrd_ext_built = False
debug = False
try:
import genx.models.lib.sxrd_ext
sxrd_ext_built = True
_turbo_sim = True
except ImportError:
sxrd_ext_built = False
_turbo_sim = False
# Try to complie the extensions - if necessary
# if not sxrd_ext_built or debug:
# try:
# import subprocess
# subprocess.run(['python3', '../build_ext.py', 'build_ext', '--inplace'])
# import genx.models.lib.sxrd_ext
# _turbo_sim = True
# except:
# print('Could not build sxrd c extension')
# _turbo_sim = False
__pars__ = ['Sample', 'UnitCell', 'Slab', 'AtomGroup', 'Instrument']
class Sample:
def __init__(self, inst, bulk_slab, slabs, unit_cell,
surface_sym=[], bulk_sym=[]):
self.set_bulk_slab(bulk_slab)
self.set_slabs(slabs)
self.set_surface_sym(surface_sym)
self.set_bulk_sym(bulk_sym)
self.inst = inst
self.set_unit_cell(unit_cell)
def set_bulk_slab(self, bulk_slab):
'''Set the bulk unit cell to bulk_slab
'''
if type(bulk_slab) != type(Slab()):
raise TypeError("The bulk slab has to be a member of class Slab")
self.bulk_slab = bulk_slab
def set_slabs(self, slabs):
'''Set the slabs of the sample.
slabs should be a list of objects from the class Slab
'''
if type(slabs) != type([]):
raise TypeError("The surface slabs has to contained in a list")
if min([type(slab) == type(Slab()) for slab in slabs]) == 0:
raise TypeError("All members in the slabs list has to be a memeber of class Slab")
self.slabs = slabs
def set_surface_sym(self, sym_list):
'''Sets the list of symmetry operations for the surface.
sym_list has to be a list ([]) of symmetry elements from the
class SymTrans
'''
# Type checking
if type(sym_list) != type([]):
raise TypeError("The surface symmetries has to contained in a list")
if sym_list == []:
sym_list = [SymTrans()]
if min([type(sym) == type(SymTrans()) for sym in sym_list]) == 0:
raise TypeError("All members in the symmetry list has to be a memeber of class SymTrans")
self.surface_sym = sym_list
def set_bulk_sym(self, sym_list):
'''Sets the list of allowed symmetry operations for the bulk
sym_list has to be a list ([]) of symmetry elements from the
class SymTrans
'''
# Type checking
if type(sym_list) != type([]):
raise TypeError("The surface symmetries has to contained in a list")
if sym_list == []:
sym_list = [SymTrans()]
if min([type(sym) == type(SymTrans()) for sym in sym_list]) == 0:
raise TypeError("All members in the symmetry list has to be a memeber of class SymTrans")
self.bulk_sym = sym_list
def set_unit_cell(self, unit_cell):
'''Sets the unitcell of the sample
'''
if type(unit_cell) != type(UnitCell(1.0, 1.0, 1.0)):
raise TypeError("The bulk slab has to be a member of class UnitCell")
if unit_cell == None:
unit_cell = UnitCell(1.0, 1,.0, 1.0)
self.unit_cell = unit_cell
def calc_f(self, h, k, l):
'''Calculate the structure factors for the sample
'''
fs = self.calc_fs(h, k, l)
fb = self.calc_fb(h, k, l)
ftot = fs + fb
return ftot*self.inst.inten
def turbo_calc_f(self, h, k, l):
'''Calculate the structure factors for the sample with
inline c code for the surface.
'''
fs = self.turbo_calc_fs(h, k, l)
fb = self.calc_fb(h, k, l)
ftot = fs + fb
return ftot*self.inst.inten
def calc_fs(self, h, k, l):
'''Calculate the structure factors from the surface
'''
dinv = self.unit_cell.abs_hkl(h, k, l)
x, y, z, u, oc, el = self._surf_pars()
#print x, y,z
# Create all the atomic structure factors
f = self._get_f(el, dinv)
#print f.shape, h.shape, oc.shape, x.shape, y.shape, z.shape
fs = np.sum(oc*f*np.exp(-2*np.pi**2*u*dinv[:,np.newaxis]**2)\
*np.sum([np.exp(2.0*np.pi*1.0J*(
h[:,np.newaxis]*sym_op.trans_x(x, y) +
k[:,np.newaxis]*sym_op.trans_y(x, y) +
l[:,np.newaxis]*z[np.newaxis, :]))
for sym_op in self.surface_sym], 0)
,1)
return fs
def turbo_calc_fs(self, h, k, l):
'''Calculate the structure factors with cython (inline c code)
Produces faster simulations of large structures.
'''
h = h.astype(np.float64)
k = k.astype(np.float64)
l = l.astype(np.float64)
dinv = self.unit_cell.abs_hkl(h, k, l)
x, y, z, u, oc, el = self._surf_pars()
f = self._get_f(el, dinv)
Pt = np.array([np.c_[so.P, so.t] for so in self.surface_sym])
fs = genx.models.lib.sxrd_ext.surface_lattice_sum(x, y, z, h, k, l, u, oc, f, Pt, dinv)
return fs
def calc_fb(self, h, k, l):
'''Calculate the structure factors from the bulk
'''
dinv = self.unit_cell.abs_hkl(h, k, l)
x, y, z, el, u, oc, c = self.bulk_slab._extract_values()
oc = oc/float(len(self.bulk_sym))
f = self._get_f(el, dinv)
# Calculate the "shape factor" for the CTRs
eff_thick = self.unit_cell.c/np.sin(self.inst.alpha*np.pi/180.0)
alpha = (2.82e-5*self.inst.wavel*eff_thick/self.unit_cell.vol()*
np.sum(f.imag,1))
denom = np.exp(2.0*np.pi*1.0J*l)*np.exp(-alpha) - 1.0
# Delta functions to remove finite size effect in hk plane
delta_funcs=(abs(h - np.round(h)) < 1e-12)*(
abs(k - np.round(k)) < 1e-12)
# Sum up the uc struct factors
f_u = np.sum(oc*f*np.exp(-2*np.pi**2*u*dinv[:, np.newaxis]**2)*
np.sum([np.exp(2.0*np.pi*1.0J*(
h[:,np.newaxis]*sym_op.trans_x(x, y) +
k[:,np.newaxis]*sym_op.trans_y(x, y) +
l[:,np.newaxis]*z [np.newaxis, :]))
for sym_op in self.bulk_sym], 0)
,1)
# Putting it all togheter
fb = f_u/denom*delta_funcs
return fb
def calc_rhos(self, x, y, z, sb = 0.8):
'''Calcualte the electron density of the unitcell
'''
px, py, pz, u, oc, el = self._surf_pars()
rhos = self._get_rho(el)
rho = np.sum([np.sum([rho(self.unit_cell.dist(x, y, z,
sym_op.trans_x(xat, yat)%1.0,
sym_op.trans_y(xat, yat)%1.0,
zat),
0.5*uat+0.5/sb**2, ocat)
for rho, xat, yat, zat, uat, ocat in
zip(rhos, px, py, pz, u, oc)], 0)
for sym_op in self.surface_sym], 0)
return rho
def _surf_pars(self):
'''Extracts the necessary parameters for simulating the surface part
'''
# Extract the parameters we need
# the star in zip(*... transform the list elements to arguments
xt, yt, zt, elt, ut, oct, ct = list(zip(*[slab._extract_values()
for slab in self.slabs]))
x = np. r_[xt]
y = np.r_[yt]
# scale and shift the slabs with respect to each other
cn = np.cumsum(np.r_[0, ct])[:-1]
z = np.concatenate([zs*c_s + c_cum
for zs, c_cum, c_s in zip(zt, cn, ct)])
#el = reduce(lambda x,y:x+y, elt)
el = np.r_[elt]
u = np.r_[ut]
# Account for overlapping atoms
oc = np.r_[oct]/float(len(self.surface_sym))
#print x,y,z, u
return x, y, z, u, oc, el
def create_uc_output(self):
''' Create atomic positions and such for output '''
x, y, z, u, oc, el = self._surf_pars()
ids = []
[ids.extend(slab._extract_ids()) for slab in self.slabs]
xout = np.array([])
yout = np.array([])
zout = np.array([])
uout = np.array([])
ocout = np.array([])
elout = el[0:0].copy()
idsout = []
for sym_op in self.surface_sym:
xout = np.r_[xout, sym_op.trans_x(x, y)]
yout = np.r_[yout, sym_op.trans_y(x, y)]
zout = np.r_[zout, z]
uout = np.r_[uout, u]
ocout = np.r_[ocout, oc]
elout = np.r_[elout, el]
idsout.extend(ids)
return xout, yout, zout, uout, ocout, elout, idsout
def _get_f(self, el, dinv):
'''from the elements extract an array with atomic structure factors
'''
return _get_f(self.inst, el, dinv)
def _get_rho(self, el):
'''Returns the rho functions for all atoms in el
'''
return _get_rho(self.inst, el)
def _fatom_eval(self, f, element, s):
'''Smart (fast) evaluation of f_atom. Only evaluates f if not
evaluated before.
element - element string
f - dictonary for lookup
s - sintheta_over_lambda array
'''
return _fatom_eval(inst, f, element, s)
class UnitCell:
'''Class containing the unitcell.
This also allows for simple crystalloraphic computing of different
properties.
'''
def __init__(self, a, b, c, alpha = 90,
beta = 90, gamma = 90):
self.set_a(a)
self.set_b(b)
self.set_c(c)
self.set_alpha(alpha)
self.set_beta(beta)
self.set_gamma(gamma)
def set_a(self, a):
self.a = a
def set_b(self, b):
self.b = b
def set_c(self, c):
self.c = c
def set_alpha(self, alpha):
self.alpha = alpha*np.pi/180.
def set_beta(self, beta):
self.beta = beta*np.pi/180.
def set_gamma(self, gamma):
self.gamma = gamma*np.pi/180.
def vol(self):
'''Calculate the volume of the unit cell in AA**3
'''
vol = self.a*self.b*self.c*np.sqrt(1 - np.cos(self.alpha)**2 -
np.cos(self.beta)**2 - np.cos(self.gamma)**2 +
2*np.cos(self.alpha)*np.cos(self.beta)*np.cos(self.gamma))
return vol
def cart_coords(self, uc_x, uc_y, uc_z):
'''Transform the uc coors uc_x, uc_y, uc_z to cartesian
coordinates expressed in AA
'''
return (cart_coord_x(uc_x, uc_y, uc_z), cart_coord_y(uc_x, uc_y, uc_z),
cart_coord_z(uc_x, uc_y, uc_z))
def cart_coord_x(self, uc_x, uc_y, uc_z):
'''Get the x-coord in the cart system
'''
return uc_x*self.a
def cart_coord_y(self, uc_x, uc_y, uc_z):
'''Get the y-coord in the cart system
'''
return uc_y*self.b
def cart_coord_z(self, uc_x, uc_y, uc_z):
'''Get the y-coord in the cart system
'''
return uc_z*self.c
def dist(self, x1, y1, z1, x2, y2, z2):
'''Calculate the distance in AA between the points
(x1, y1, z1) and (x2, y2, z2). The coords has to be unit cell
coordinates.
'''
#print 'Warning works only with orth cryst systems!'
return np.sqrt(((x1 - x2)*self.a)**2 + ((y1 - y2)*self.b)**2 +
((z1 - z2)*self.c)**2)
def abs_hkl(self, h, k, l):
'''Returns the absolute value of (h,k,l) vector in units of
AA.
This is equal to the inverse lattice spacing 1/d_hkl.
'''
dinv = np.sqrt(((h/self.a*np.sin(self.alpha))**2 +
(k/self.b*np.sin(self.beta))**2 +
(l/self.c*np.sin(self.gamma))**2 +
2*k*l/self.b/self.c*(np.cos(self.beta)*
np.cos(self.gamma) -
np.cos(self.alpha)) +
2*l*h/self.c/self.a*(np.cos(self.gamma)*
np.cos(self.alpha) -
np.cos(self.beta)) +
2*h*k/self.a/self.b*(np.cos(self.alpha)*
np.cos(self.beta) -
np.cos(self.gamma)))
/(1 - np.cos(self.alpha)**2 - np.cos(self.beta)**2
- np.cos(self.gamma)**2 + 2*np.cos(self.alpha)
*np.cos(self.beta)*np.cos(self.gamma)))
return dinv
class Slab:
par_names = ['dx', 'dy', 'dz',
'u', 'oc', 'm']
def __init__(self, name = '', c = 1.0, slab_oc = 1.0):
try:
self.c = float(c)
except:
raise ValueError("Parameter c has to be a valid floating point number")
try:
self.slab_oc = float(slab_oc)
except:
raise ValueError("Parameter slab_oc has to be a valid floating point number")
# Set the arrays to their default values
self.x = np.array([], dtype = np.float64)
self.y = np.array([], dtype = np.float64)
self.z = np.array([], dtype = np.float64)
self.dx = np.array([], dtype = np.float64)
self.dy = np.array([], dtype = np.float64)
self.dz = np.array([], dtype = np.float64)
self.u = np.array([], dtype = np.float64)
self.oc = np.array([], dtype = np.float64)
self.m = np.array([], dtype = np.float64)
self.id = np.array([], dtype = np.str)
self.el = np.array([], dtype = np.str)
# TODO: Type checking and defaults!
#self.inst = inst
self.name = str(name)
def copy(self):
'''Returns a copy of the object.
'''
cpy = Slab(c = self.c, slab_oc = self.slab_oc)
for i in range(len(self.id)):
cpy.add_atom(str(self.id[i]), str(self.el[i]),
self.x[i], self.y[i],
self.z[i], self.u[i], self.oc[i], self.m[i])
cpy.dz[-1] = self.dz[i]
cpy.dx[-1] = self.dx[i]
cpy.dy[-1] = self.dy[i]
return cpy
def add_atom(self,id, element, x, y, z, u = 0.0, oc = 1.0, m = 1.0):
'''Add an atom to the slab.
id - a unique id for this atom (string)
element - the element of this atom has to be found
within the scatteringlength table.
x, y, z - position in the assymetricv unit cell (floats)
u - debye-waller parameter for the atom
oc - occupancy of the atomic site
'''
if id in self.id:
raise ValueError('The id %s is already defined in the'
'slab'%(id))
# TODO: Check the element as well...
self.x = np.append(self.x, x)
self.dx = np.append(self.dx, 0.)
self.y = np.append(self.y, y)
self.dy = np.append(self.dy, 0.)
self.z = np.append(self.z, z)
self.dz = np.append(self.dz, 0.)
self.u = np.append(self.u, u)
self.oc = np.append(self.oc, oc)
self.m = np.append(self.m, m)
self.id = np.append(self.id, id)
self.el = np.append(self.el, str(element))
item = len(self.id) - 1
# Create the set and get functions dynamically
for par in self.par_names:
p = par
setattr(self, 'set' + id + par, self._make_set_func(par, item))
setattr(self, 'get' + id + par, self._make_get_func(par, item))
return AtomGroup(self, id)
def del_atom(self, id):
'''Remove atom identified with id
'''
if not id in self.id:
raise ValueError('Can not remove atom with id %s -'
'namedoes not exist')
item = np.argwhere(self.id == id)[0][0]
if item < len(self.x) - 1:
ar = getattr(self, 'id')
setattr(self, 'id', r_[ar[:item], ar[item+1:]])
ar = getattr(self, 'el')
setattr(self, 'el', r_[ar[:item], ar[item+1:]])
ar = getattr(self, 'x')
setattr(self, 'x', r_[ar[:item], ar[item+1:]])
ar = getattr(self, 'y')
setattr(self, 'y', r_[ar[:item], ar[item+1:]])
ar = getattr(self, 'z')
setattr(self, 'z', r_[ar[:item], ar[item+1:]])
for par in self.par_names:
ar = getattr(self, par)
setattr(self, par, r_[ar[:item], ar[item+1:]])
delattr(self, 'set' + id + par)
delattr(self, 'get' + id + par)
else:
ar = getattr(self, 'id')
setattr(self, 'id', ar[:-1])
ar = getattr(self, 'el')
setattr(self, 'el', ar[:-1])
ar = getattr(self, 'x')
setattr(self, 'x', ar[:-1])
ar = getattr(self, 'y')
setattr(self, 'y', ar[:-1])
ar = getattr(self, 'z')
setattr(self, 'z', ar[:-1])
for par in self.par_names:
ar = getattr(self, par)
setattr(self, par, ar[:-1])
delattr(self, 'set' + id + par)
delattr(self, 'get' + id + par)
def find_atoms(self, expression):
'''Find the atoms that satisfy the logical expression given in the
string expression. Expression can also be a list or array of the
same length as the number of atoms in the slab.
Allowed variables in expression are:
x, y, z, u, occ, id, el
returns an AtomGroup
'''
if (type(expression) == type(np.array([])) or
type(expression) == type(list([]))):
if len(expression) != len(self.id):
raise ValueError('The length of experssion is wrong'
', it should match the number of atoms')
ag = AtomGroup()
[ag.add_atom(self, str(id)) for id, add in
zip(self.id, expression) if add]
return ag
elif type(expression) == type(''):
choose_list = [eval(expression) for x,y,z,u,oc,el,id in
zip(self.x, self.y, self.z, self.u,
self.oc, self.el, self.id)]
#print choose_list
ag = AtomGroup()
[ag.add_atom(self, str(name)) for name, add
in zip(self.id, choose_list) if add]
return ag
else:
raise ValueError('Expression has to be a string, array or list')
def all_atoms(self):
'''Puts all atoms in the slab to an AtomGroup.
returns: AtomGroup
'''
return self.find_atoms([True]*len(self.id))
def set_c(self, c):
'''Set the out-of-plane extension of the slab.
Note that this is in the defined UC coords given in
the corresponding sample
'''
self.c = float(c)
def get_c(self):
'''Get the out-of-plane extension of the slab in UC coord.
'''
return self.c
def set_oc(self, oc):
'''Set a global occupation parameter for the entire slab.
should be between 0 and 1. To create the real occupancy this
value is multiplied with the occupancy for that atom.
'''
self.slab_oc = oc
def get_oc(self):
'''Get the global occupancy of the slab
'''
return self.slab_oc
def __getitem__(self, id):
'''Locate id in slab with a dictonary style.
Returns a AtomGroup instance
'''
return AtomGroup(self, id)
def __contains__(self, id):
'''Makes it possible to check if id exist in this Slab by using
the in operator. It is also possible if all atoms in an AtomGroup
belongs to the slab.
returns True or False
'''
if type(id) == type(''):
return id in self.id
elif type(id) == type(AtomGroup):
return np.all([atid in self.id for atid in id.ids])
else:
raise ValueError('Can only check for mebership for Atom groups'
'or string ids.')
def _set_in(self, arr, pos, value):
'''Sets a value in an array or list
'''
arr[pos]=value
def _make_set_func(self, par, pos):
''' Creates a set functions for parameter par and at pos.
Returns a function
'''
def set_par(val):
getattr(self, par)[pos] = val
return set_par
def _make_get_func(self, par, pos):
'''Cerates a set function for member par at pos.
Returns a function.
'''
def get_par():
return getattr(self, par)[pos]
return get_par
def _extract_values(self):
return self.x + self.dx, self.y + self.dy, self.z + self.dz,\
self.el, self.u, self.oc*self.m*self.slab_oc, self.c
def _extract_ids(self):
'Extract the ids of the atoms'
return [self.name + '.' + str(id) for id in self.id]
class AtomGroup:
par_names = ['dx', 'dy', 'dz', 'u', 'oc']
def __init__(self, slab = None, id = None):
self.ids = []
self.slabs = []
# Variable for composition ...
self.comp = 1.0
self.oc = 1.0
if slab != None and id != None:
self.add_atom(slab, id)
def _set_func(self, par):
'''create a function that sets all atom paramater par'''
funcs = [getattr(slab, 'set'+ id + par) for id, slab
in zip(self.ids, self.slabs)]
def set_pars(val):
[func(val) for func in funcs]
return set_pars
def _get_func(self, par):
'''create a function that gets all atom paramater par'''
funcs = [getattr(slab, 'get' + id + par) for id, slab
in zip(self.ids, self.slabs)]
def get_pars():
return np.mean([func() for func in funcs])
return get_pars
def update_setget_funcs(self):
'''Update all the atomic set and get functions
'''
for par in self.par_names:
setattr(self, 'set' + par, self._set_func(par))
setattr(self, 'get' + par, self._get_func(par))
def add_atom(self, slab, id):
'''Add an atom to the group.
'''
if not id in slab:
raise ValueError('The id %s is not a member of the slab'%id)
self.ids.append(id)
self.slabs.append(slab)
self.update_setget_funcs()
def _copy(self):
'''Creates a copy of self And looses all connection to the
previously created compositions conenctions
'''
cpy = AtomGroup()
cpy.ids = self.ids[:]
cpy.slabs = self.slabs[:]
cpy.update_setget_funcs()
return cpy
def comp_coupl(self, other, self_copy = False, exclusive = True):
'''Method to create set-get methods to use compositions
in the atomic groups. Note that this does not affect
the slabs global occupancy. If self_copy is True the
returned value will be a copy of self.
If exculive is true reomves all methods from the
previous AtomGroups that are coupled.
'''
if not type(self) == type(other):
raise TypeError('To create a composition function both objects'
' has to be of the type AtomGroup')
if hasattr(other, '_setoc_'):
raise AttributeError('The right hand side AtomicGroup has already'
'been coupled to another one before.'
' Only one connection'
'is allowed')
if hasattr(self, '_setoc'):
raise AttributeError('The left hand side AtomicGroup has already'
'been coupled to another one before.'
' Only one connection'
'is allowed')
if self_copy:
s = self._copy()
else:
s = self
def set_comp(comp):
#print "Executing comp function"
s.comp = float(comp)
s._setoc(comp*s.oc)
other._setoc_((1.0 - comp)*s.oc)
def set_oc(oc):
#print "Executing oc function"
s.oc = float(oc)
s._setoc(s.comp*s.oc)
other._setoc_((1 - s.comp)*s.oc)
def get_comp():
return s.comp
def get_oc():
return s.oc
# Functions to couple the other parameters, set
def create_set_func(par):
sf_set = getattr(s, 'set' + par)
of_set = getattr(other, 'set' + par)
def _set_func(val):
p = str(par)
#print 'Setting %s to %s'%(p, val)
sf_set(val)
of_set(val)
return _set_func
# Functions to couple the other parameters, set
def create_get_func(par):
sf_get = getattr(s, 'get' + par)
of_get = getattr(other, 'get' + par)
def _get_func():
p = str(par)
return (sf_get() + of_get())/2
return _get_func
# Do it (couple) for all parameters except the occupations
if exclusive:
for par in s.par_names:
if not str(par) == 'oc':
#print par
setattr(s, 'set' + par, create_set_func(par))
setattr(s, 'get' + par, create_get_func(par))
# Create new set and get methods for the composition
setattr(s, 'setcomp', set_comp)
setattr(s, 'getcomp', get_comp)
# Store the original setoc for future use safely
setattr(s, '_setoc', s.setoc)
setattr(other, '_setoc_', getattr(other, 'setoc'))
setattr(s, 'setoc', set_oc)
setattr(s, 'getoc', get_oc)
# Now remove all the coupled attribute from other.
if exclusive:
for par in s.par_names:
delattr(other, 'set' + par)
s.setcomp(1.0)
return s
def __xor__(self, other):
'''Method to create set-get methods to use compositions
in the atomic groups. Note that this does not affect
the slabs global occupancy. Note that the
first element (left hand side of ^) will be copied
and loose all its previous connections.
Note that all the move methods that are not coupled will
be removed.
'''
return self.comp_coupl(other, self_copy = True, exclusive = True)
def __ixor__(self, other):
'''Method to create set-get methods to use compositions
in the atomic groups. Note that this does not affect
the slabs global occupancy.
Note that all the move methods that are not coupled will
be removed.
'''
self.comp_coupl(other, exclusive = True)
def __or__(self, other):
'''Method to create set-get methods to use compositions
in the atomic groups. Note that this does not affect
the slabs global occupancy. Note that the
first element (left hand side of |) will be copied
and loose all its previous connections.
'''
return self.comp_coupl(other, self_copy = True, exclusive = False)
def __ior__(self, other):
'''Method to create set-get methods to use compositions
in the atomic groups. Note that this does not affect
the slabs global occupancy.
'''
self.comp_coupl(other, exclusive = False)
def __add__(self, other):
'''Adds two Atomic groups togheter
'''
if not type(other) == type(self):
raise TyepError('Adding wrong type to an AtomGroup has to be an'
'AtomGroup')
ids = self.ids + other.ids
slabs = self.slabs + other.slabs
out = AtomGroup()
[out.add_atom(slab, id) for slab, id in zip(slabs, ids)]
s = self
def set_oc(oc):
#print "Executing oc function"
s.oc = float(oc)
s.setoc(s.oc)
other.setoc(s.oc)
def get_oc():
return s.oc
setattr(out, 'setoc', set_oc)
setattr(out, 'getoc', get_oc)
return out
class Instrument:
'''Class that keeps tracks of instrument settings.
'''
geometries = ['alpha_in fixed', 'alpha_in eq alpha_out',
'alpha_out fixed']
def __init__(self, wavel, alpha, geom = 'alpha_in fixed', flib=None, rholib=None):
'''Inits the instrument with default parameters
'''
if flib is None:
self.flib = utils.sl.FormFactor(wavel, utils.__lookup_f__)
else:
self.flib = flib
if rholib is None:
self.rholib = utils.sl.FormFactor(wavel, utils.__lookup_rho__)
else:
self.rholib = rholib
self.set_wavel(wavel)
self.set_geometry(geom)
self.alpha = alpha
self.inten = 1.0
def set_inten(self, inten):
'''Set the incomming intensity
'''
self.inten = inten
def get_inten(self):
'''retrieves the intensity
'''
return self.inten
def set_wavel(self, wavel):
'''Set the wavelength in AA
'''
try:
self.wavel = float(wavel)
self.flib.set_wavelength(wavel)
self.rholib.set_wavelength(wavel)
except ValueError:
raise ValueError('%s is not a valid float number needed for the'
'wavelength'%(wavel))
def get_wavel(self, wavel):
'''Returns the wavelength in AA
'''
return self.wavel
def set_energy(self, energy):
'''Set the energy in keV
'''
try:
self.set_wavel(12.39842/float(energy))
except ValueError:
raise ValueErrror('%s is not a valid float number needed for the'
'energy'%(wavel))
def get_energy(self, energy):
'''Returns the photon energy in keV
'''
return 12.39842/self.wavel
def set_alpha(self, alpha):
'''Sets the freezed angle. The meaning of this angle varies depening
of the geometry parameter.
geo = "alpha_in fixed", alpha = alpha_in
geo = "alpha_in eq alpha_out", alpha = alpha_in = alpha_out
geo = "alpha_out fixed", alpha = alpha_out
'''
self.alpha = alpha
def get_alpha(self):
'''Gets the freexed angle. See set_alpha.
'''
return self.alpha
def set_geometry(self, geom):
'''Set the measurement geometry
Should be one of the items in Instrument.geometry
'''
try:
self.geom = self.geometries.index(geom)
except ValueError:
raise ValueError('The geometry %s does not exist please choose'
'one of the following:\n%s'%(geom,
self.geomeries))
def set_flib(self, flib):
'''Set the structure factor library
'''
self.flib = flib
def set_rholib(self, rholib):
'''Set the rho library (electron density shape of the atoms)
'''
self.rholib = rholib
class SymTrans:
def __init__(self, P = [[1,0],[0,1]], t = [0,0]):
# TODO: Check size of arrays!
self.P = np.array(P, dtype = np.float64)
self.t = np.array(t, dtype = np.float64)
def trans_x(self, x, y):
'''transformed x coord
'''
#print self.P[0][0]*x + self.P[0][1]*y + self.t[0]
return self.P[0][0]*x + self.P[0][1]*y + self.t[0]
def trans_y(self, x, y):
'''transformed x coord
'''
#print self.P[1][0]*x + self.P[1][1]*y + self.t[1]
return self.P[1][0]*x + self.P[1][1]*y + self.t[1]
def apply_symmetry(self, x, y):
return np.dot(P, c_[x, y]) + t
#==============================================================================
# Utillity functions
def scale_sim(data, sim_list, scale_func = None):
'''Scale the data according to a miminimazation of
sum (data-I_list)**2
'''
numerator = sum([(data[i].y*sim_list[i]).sum() for i in range(len(data))
if data[i].use])
denominator = sum([(sim_list[i]**2).sum() for i in range(len(data))
if data[i].use])
scale = numerator/denominator
scaled_sim_list = [sim*scale for sim in sim_list]
if not scale_func == None:
scale_func(scale)
return scaled_sim_list
def scale_sqrt_sim(data, sim_list, scale_func = None):
'''Scale the data according to a miminimazation of
sum (sqrt(data)-sqrt(I_list))**2
'''
numerator = sum([(np.sqrt(data[i].y*sim_list[i])).sum()
for i in range(len(data))
if data[i].use])
denominator = sum([(sim_list[i]).sum() for i in range(len(data))
if data[i].use])
scale = numerator/denominator
scaled_sim_list = [sim*scale**2 for sim in sim_list]
if not scale_func == None:
scale_func(scale)
return scaled_sim_list
## def scale_log_sim(data, sim_list):
## '''Scale the data according to a miminimazation of
## sum (log(data)-log(I_list))**2
## '''
## numerator = sum([(np.log10(data[i].y)*np.log10(sim_list[i])).sum()
## for i in range(len(data)) if data[i].use])
## denominator = sum([(np.log10(sim_list[i])**2).sum()
## for i in range(len(data)) if data[i].use])
## scale = numerator/denominator
## print scale
## scaled_sim_list = [sim*(10**-scale) for sim in sim_list]
## return scaled_sim_list
def _get_f(inst, el, dinv):
'''from the elements extract an array with atomic structure factors
'''
fdict = {}
f = np.transpose(np.array([_fatom_eval(inst, fdict, elem, dinv/2.0) for elem in el], dtype=np.complex128))
return f
def _get_rho(inst, el):
'''Returns the rho functions for all atoms in el
'''
rhos = [getattr(inst.rholib, elem) for elem in el]
return rhos
def _fatom_eval(inst, f, element, s):
'''Smart (fast) evaluation of f_atom. Only evaluates f if not
evaluated before.
element - element string
f - dictonary for lookup
s - sintheta_over_lambda array
'''
try:
fret = f[element]
except KeyError:
fret = getattr(inst.flib, element)(s)
f[element] = fret
#print element, fret[0]
return fret
#=============================================================================
if __name__ == '__main__':
inst = Instrument(wavel = 0.77, alpha = 0.2)
ss1 = Slab(c = 1.00)
ss1.add_atom('La', 'la', 0.0, 0.0, 0.0, 0.001, 1.0, 1)
ss1.add_atom('Al', 'al', 0.5, 0.5, 0.5, 0.001, 1.0, 1)
ss1.add_atom('O1', 'o', 0.5, 0.5, 0.0, 0.001, 1.0, 1)
ss1.add_atom('O2', 'o', 0.0, 0.5, 0.5, 0.001, 1.0, 1)
ss1.add_atom('O3', 'o', 0.5, 0.0, 0.5, 0.001, 1.0, 1)
bulk = Slab()
bulk.add_atom('Sr', 'sr', 0.0, 0.0, 0.0, 0.001, 1.0)
bulk.add_atom('Ti', 'ti', 0.5, 0.5, 0.5, 0.001, 1.0)
bulk.add_atom('O1', 'o', 0.5, 0.0, 0.5, 0.001, 1.0)
bulk.add_atom('O2', 'o', 0.0, 0.5, 0.5, 0.001, 1.0)
bulk.add_atom('O3', 'o', 0.5, 0.5, 0.0, 0.001, 1.0)
sample = Sample(inst, bulk, [ss1]*1,
UnitCell(3.945, 3.945, 3.945, 90, 90, 90))
l = np.arange(0.0, 5, 0.01)
h = 0.0*np.ones(l.shape)
k = 1.0*np.ones(l.shape)
f = sample.calc_f(h, k, l)
s_sym = Slab(c = 1.00)
s_sym.add_atom('La', 'la', 0.0, 0.0, 0.0, 0.001, 1.0, 1)
s_sym.add_atom('Al', 'al', 0.5, 0.5, 0.5, 0.001, 1.0, 1)
s_sym.add_atom('O1', 'o', 0.5, 0.5, 0.0, 0.001, 1.0, 1)
s_sym.add_atom('O2', 'o', 0.5, 0.0, 0.5, 0.001, 1.0, 2)
p4 = [SymTrans([[1, 0],[0, 1]]), SymTrans([[-1, 0],[0, -1]]),
SymTrans([[0, -1],[1, 0]]), SymTrans([[0, 1],[-1, 0]])]
sample2 = Sample(inst, bulk, [s_sym]*1,
UnitCell(3.945, 3.945, 3.945, 90, 90, 90))
sample2.set_surface_sym(p4)
#z = np.arange(-0.1, 3.5, 0.01)
#x = 0*z + 0.5
#y = 0*z + 0.5
#rho = sample2.calc_rhos(x, y, z)
f2 = sample2.calc_f(h, k, l)
import time
t1 = time.time()
sf = sample2.calc_fs(h, k, l)
t2 = time.time()
print('Python: %f seconds'%(t2-t1))
t3 = time.time()
sft = sample2.turbo_calc_fs(h, k, l)
t4 = time.time()
print('Inline C: %f seconds'%(t4-t3))
|
haozhangphd/genx-py3
|
genx/models/sxrd.py
|
Python
|
gpl-3.0
| 42,067
|
[
"Gaussian"
] |
f231b32831298e634121953ec7c611b51ebcc7b8b2ec74188ff1eb6bbba9ecbd
|
#!/usr/bin/env python
#
# $File: cppParentChooser.py $
#
# This file is part of simuPOP, a forward-time population genetics
# simulation environment. Please visit http://simupop.sourceforge.net
# for details.
#
# Copyright (C) 2004 - 2010 Bo Peng (bpeng@mdanderson.org)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script is an example in the simuPOP user's guide. Please refer to
# the user's guide (http://simupop.sourceforge.net/manual) for a detailed
# description of this example.
#
import simuPOP as sim
# The class myParentsChooser is defined in module myParentsChooser
try:
from myParentsChooser import myParentsChooser
except ImportError:
# if failed to import the C++ version, use a Python version
import random
class myParentsChooser:
def __init__(self, maleIndexes, femaleIndexes):
self.maleIndexes = maleIndexes
self.femaleIndexes = femaleIndexes
def chooseParents(self):
return self.maleIndexes[random.randint(0, len(self.maleIndexes)-1)],\
self.femaleIndexes[random.randint(0, len(self.femaleIndexes)-1)]
def parentsChooser(pop, sp):
'How to call a C++ level parents chooser.'
# create an object with needed information (such as x, y) ...
pc = myParentsChooser(
[x for x in range(pop.popSize()) if pop.individual(x).sex() == sim.MALE],
[x for x in range(pop.popSize()) if pop.individual(x).sex() == sim.FEMALE])
while True:
# return indexes of parents repeatedly
yield pc.chooseParents()
pop = sim.Population(100, loci=1)
simu.evolve(
initOps=[
sim.InitSex(),
sim.InitGenotype(freq=[0.5, 0.5])
],
matingScheme=sim.HomoMating(sim.PyParentsChooser(parentsChooser),
sim.OffspringGenerator(ops=sim.MendelianGenoTransmitter())),
gen = 100
)
|
BoPeng/simuPOP
|
docs/cppParentChooser.py
|
Python
|
gpl-2.0
| 2,430
|
[
"VisIt"
] |
447a025d6060822939b12aed689cdda0bb65f00f4c4dce6b7f497e4399773535
|
#! /usr/bin/python
#coding=utf-8
import pycurl, re, cStringIO #pycurl is better than urllib
from Tkinter import * #import functions only when necessary
url0 = "http://site.baidu.com" #begin from a navigate page
global MAXNUM #the max num of web pages to visit
MAXNUM = 20
global urlnum #urls already visited successfully
urlnum = 0;
urlslist = [] #list to store url, here is like queue
dict_jquery = {} #just like map in C++
#BETTER: when libs are too many, use map and struct to simplify code
#compile regex will save time
reg_link = re.compile(r'href="(https?://.+?)"')
reg_jquery = re.compile(r'<script .*?src=".+?jquery', re.IGNORECASE)
reg_prototype = re.compile(r'<script .*?src=".+?prototype', re.IGNORECASE)
reg_moontools = re.compile(r'<script .*?src=".+?moontools', re.IGNORECASE)
reg_dojo = re.compile(r'<script .*?src=".+?dojo', re.IGNORECASE)
reg_yui = re.compile(r'<script .*?src=".+?yui', re.IGNORECASE)
reg_jqeury_version = re.compile(r'<script .*?src=".+?(jquery-.*?.js).*?"', re.IGNORECASE)
global Jquery #num of pages using Jquery
Jquery = 0
global Prototype #num of pages using Prototype
Prototype = 0
global MoonTools #num of pages using MoonTools
MoonTools = 0
global Dojo #num of pages using Dojo
Dojo = 0
global YUI #num of pages using YUI
YUI = 0
buf = cStringIO.StringIO()
c = pycurl.Curl()
c.setopt(c.WRITEFUNCTION, buf.write) #web content will be put into buf now
c.setopt(c.CONNECTTIMEOUT, 5) #out of time to connect
c.setopt(c.TIMEOUT, 5) #out of time to download
c.setopt(pycurl.USERAGENT, "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)") #simulate the browser
c.setopt(pycurl.MAXREDIRS, 5) #max num to redirect
#maybe need to set PROXY and COOKIE
#c.setopt(c.PROXY, 'http://inthemiddle.com:8080')
#c.setopt(c.POST, 1)
#c.setopt(c.POSTFIELDS, 'pizza=Quattro+Stagioni&extra=cheese')
#c.setopt(c.VERBOSE, True)
def visit(cnturl): #visit current url and analyse
global urlnum
global Jquery
global Prototype
global MoonTools
global Dojo
global YUI
c.setopt(c.URL, cnturl)
try:
c.perform() #connect and download
except: #exception or error: can't download, etc.
pass
urlnum = urlnum + 1;
html = buf.getvalue()
urls = reg_link.findall(html)
if reg_jquery.search(html) != None: #search successfully!
Jquery = Jquery + 1
if reg_prototype.search(html) != None:
Prototype = Prototype + 1
if reg_moontools.search(html) != None:
MoonTools = MoonTools + 1
if reg_dojo.search(html) != None:
Dojo = Dojo + 1
if reg_yui.search(html) != None:
YUI = YUI + 1
libs = reg_jqeury_version.findall(html) #find all jquery versions
libslist = []
for each in libs: #remove identical lib in a page
if each not in libslist:
libslist.append(each)
for each in libslist:
if each in dict_jquery: #use diction to sum
dict_jquery[each] = dict_jquery[each] + 1
else:
dict_jquery[each] = 1
#set is also a good choice. However, both may have some identical urls due to link-cycle
#BETTER: use another list, pop and add, sum in the final(or store pages)
for each in urls:
if each not in urlslist: #ensure the url are unique
urlslist.append(each)
urlslist.pop(0)
def Application_button_result():
reply.delete('0.0', END)
#% is used to catenate strings
content = "%s%d%s"%("Total num of pages: ", MAXNUM, "\n")
content += "%s%d%s"%("jquery: ", Jquery, "%\n")
content += "%s%d%s"%("prototype: ", Prototype, "%\n")
content += "%s%d%s"%("moontools: ", MoonTools, "%\n")
content += "%s%d%s"%("dojo: ", Dojo, "%\n")
content += "%s%d%s"%("yui: ", YUI, "%\n")
reply.insert(END, content, 'green') #insert new content into text area
def Application_button_clear():
reply.delete('0.0', END) #clear the text area
def Application_button_more():
reply.delete('0.0', END)
content = "%s%d%s%d%s"%("More about Jquery\nTotal num of pages: ", MAXNUM, "\nAll Jquery verision percentage: ", Jquery, "%\n\n")
for each in dict_jquery:
content += "%s%s%d%s"%(each, " : ", dict_jquery[each], "%\n")
reply.insert(END, content, 'green')
start = True
urlslist.append(url0) #insert first element, just like queue
while urlslist != [] and urlnum < MAXNUM: #notice the loop border
visit(urlslist[0])
if start == True and urlslist == []:
print "Please ensure you have connected to the Internet!"
exit()
elif start == True:
start = False
#compute the percentage for each JS lib
Jquery = Jquery * 100 / MAXNUM
Prototype = Prototype * 100 / MAXNUM
MoonTools = MoonTools * 100 / MAXNUM
Dojo = Dojo * 100 / MAXNUM
YUI = YUI * 100 / MAXNUM
for each in dict_jquery:
dict_jquery[each] = dict_jquery[each] * 100 / MAXNUM
#use Tkinter to show result
root = Tk()
root.title(unicode('Javascript Statistics', 'utf-8'))
#create several frames as container
frame_left_top = Frame(width=400, height=300, bg='white')
frame_left_center_left = Frame(width=130, height=100)
frame_left_center_mid = Frame(width=130, height=100)
frame_left_center_right = Frame(width=130, height=100)
#frame_left_bottom = Frame(width=400, height=300, bg='white')
frame_right = Frame(width=200, height=700, bg='white')
#create elements needed, frame_left_bottom not used
info = Text(frame_left_top)
button_result = Button(frame_left_center_left, text=unicode('result', 'utf-8'), command=Application_button_result)
button_clear = Button(frame_left_center_mid, text=unicode('clear', 'utf-8'), command=Application_button_clear)
button_more = Button(frame_left_center_right, text=unicode('more', 'utf-8'), command=Application_button_more)
reply = Text(frame_right)
#use grid to set the position of containers
frame_left_top.grid(row=0, column=0, columnspan=3, padx=2, pady=5)
frame_left_center_left.grid(row=1, column=0)
frame_left_center_mid.grid(row=1, column=1)
frame_left_center_right.grid(row=1, column=2)
frame_right.grid(row=0, column=3, padx=4, pady=5)
#frame_left_bottom.grid(row=2, column=0, columnspan=3, padx=2, pady=5)
#father element's position/size is not relevant to child element
frame_left_top.propagate(False) #True by default
frame_left_center_left.propagate(False)
frame_left_center_mid.propagate(False)
frame_left_center_right.propagate(False)
frame_right.propagate(False)
#put elements into frame
info.grid() #sticky=E+W+S+N how to extend
button_result.grid()
button_clear.grid()
button_more.grid()
reply.grid(sticky=S)
content = \
"This application will show the distribution of JavaScript Libs. \
\n\nFor each of [Jquery, Prototype, MoonTools, Dojo, YUI], we will \
compute the rate of using it in web pages. \
\n\nWhat's more, we will analyse the distribution of different version \
of Jquery. \
\n\nIf time permitted, we can do more statistics!\n"
info.insert(END, content, 'red')
root.mainloop() #main loop for Tk GUI
#print buf.getvalue()
#fo = open("data/sites.html", "w+")
#fo.write(buf.getvalue())
#fo.close()
buf.close() #close the cStringIO buf
print "---END---"
|
bookug/study
|
python/js-counter.py
|
Python
|
gpl-3.0
| 7,112
|
[
"VisIt"
] |
709bee4f115d4d256e46d6cbca70e192a2ca852fdd65820580effc7b2c8d7a73
|
"""
Test vv.diff module
"""
__author__ = "Dan Gunter <dkgunter@lbl.gov>"
import logging
import random
import unittest
from pymatgen.db.tests.common import MockQueryEngine
from pymatgen.db.vv.diff import Differ, Delta
#
db_config = {
"host": "localhost",
"port": 27017,
"database": "test",
"aliases_config": {"aliases": {}, "defaults": {}},
}
def recname(num):
return f"item-{num:d}"
def create_record(num):
return {
"name": recname(num),
"color": random.choice(("red", "orange", "green", "indigo", "taupe", "mauve")),
"same": "yawn",
"idlist": list(range(num)),
"zero": 0,
"energy": random.random() * 5 - 2.5,
}
#
class MyTestCase(unittest.TestCase):
NUM_RECORDS = 10
@classmethod
def setUpClass(cls):
mg = logging.getLogger("mg")
mg.setLevel(logging.ERROR)
mg.addHandler(logging.StreamHandler())
def setUp(self):
self.collections, self.engines = ["diff1", "diff2"], []
self.colors = [[None, None] for i in range(self.NUM_RECORDS)]
self.energies = [[None, None] for i in range(self.NUM_RECORDS)]
for c in self.collections:
# Create mock query engine.
self.engines.append(MockQueryEngine(collection=c, **db_config))
for ei, engine in enumerate(self.engines):
engine.collection.delete_many({})
for i in range(self.NUM_RECORDS):
rec = create_record(i)
engine.collection.insert_one(rec)
# save some vars for easy double-checking
self.colors[i][ei] = rec["color"]
self.energies[i][ei] = rec["energy"]
def test_key_same(self):
"""Keys only and all keys are the same."""
# Perform diff.
df = Differ(key="name")
d = df.diff(*self.engines)
# Check results.
self.assertEqual(len(d[Differ.NEW]), 0)
self.assertEqual(len(d[Differ.MISSING]), 0)
def test_key_different(self):
"""Keys only and keys are different."""
# Add one different record to each collection.
self.engines[0].collection.insert_one(create_record(self.NUM_RECORDS + 1))
self.engines[1].collection.insert_one(create_record(self.NUM_RECORDS + 2))
# Perform diff.
df = Differ(key="name")
d = df.diff(*self.engines)
# Check results.
self.assertEqual(len(d[Differ.MISSING]), 1)
self.assertEqual(d[Differ.MISSING][0]["name"], recname(self.NUM_RECORDS + 1))
self.assertEqual(len(d[Differ.NEW]), 1)
self.assertEqual(d[Differ.NEW][0]["name"], recname(self.NUM_RECORDS + 2))
def test_eqprops_same(self):
"""Keys and props, all are the same."""
# Perform diff.
df = Differ(key="name", props=["same"])
d = df.diff(*self.engines)
# Check results.
self.assertEqual(len(d[Differ.CHANGED]), 0)
def test_eqprops_different(self):
"""Keys and props, some props out of range."""
# Perform diff.
df = Differ(key="name", props=["color"])
d = df.diff(*self.engines)
# Calculate expected results.
changed = sum(int(c[0] != c[1]) for c in self.colors)
# Check results.
self.assertEqual(len(d[Differ.CHANGED]), changed)
def test_numprops_same(self):
"""Keys and props, all are the same."""
# Perform diff.
df = Differ(key="name", deltas={"zero": Delta("+-0.001")})
d = df.diff(*self.engines)
# Check results.
self.assertEqual(len(d[Differ.CHANGED]), 0)
def test_numprops_different(self):
"""Keys and props, some props different."""
# Perform diff.
delta = 0.5
df = Differ(key="name", deltas={"energy": Delta(f"+-{delta:f}")})
d = df.diff(*self.engines)
# Calculate expected results.
is_different = lambda a, b: abs(a - b) > delta
changed = sum(int(is_different(e[0], e[1])) for e in self.energies)
# Check results.
self.assertEqual(len(d[Differ.CHANGED]), changed)
def test_numprops_different_sign(self):
"""Keys and props, some props different."""
# Perform diff.
df = Differ(key="name", deltas={"energy": Delta("+-")})
d = df.diff(*self.engines)
# Calculate expected results.
is_different = lambda a, b: a < 0 < b or b < 0 < a
changed = sum(int(is_different(e[0], e[1])) for e in self.energies)
# Check results.
self.assertEqual(len(d[Differ.CHANGED]), changed)
def test_numprops_different_pct(self):
"""Keys and props, some props different, check pct change."""
# Perform diff.
minus, plus = 10, 20
df = Differ(key="name", deltas={"energy": Delta(f"+{plus}-{minus}=%")})
d = df.diff(*self.engines)
# Calculate expected results.
def is_different(a, b):
pct = 100.0 * (b - a) / a
return pct <= -minus or pct >= plus
changed = sum(int(is_different(e[0], e[1])) for e in self.energies)
# Check results.
if len(d[Differ.CHANGED]) != changed:
result = d[Differ.CHANGED]
msg = "Values:\n"
for i, e in enumerate(self.energies):
if not is_different(*e):
continue
msg += f"{i:d}) {e[0]:f} {e[1]:f}\n"
msg += "Result:\n"
for i, r in enumerate(result):
msg += "{:d}) {} {}\n".format(i, r["old"], r["new"])
self.assertEqual(len(d[Differ.CHANGED]), changed, msg=msg)
# repeat this test a few more times
test_numprops_different_pct1 = test_numprops_different_pct
test_numprops_different_pct2 = test_numprops_different_pct
test_numprops_different_pct3 = test_numprops_different_pct
def test_delta(self):
"""Delta class parsing."""
self.assertRaises(ValueError, Delta, "foo")
def test_delta_sign(self):
"""Delta class sign."""
d = Delta("+-")
self.assertEqual(d.cmp(0, 1), False)
self.assertEqual(d.cmp(-1, 0), False)
self.assertEqual(d.cmp(-1, 1), True)
def test_delta_val(self):
"""Delta class value, same absolute."""
d = Delta("+-3")
self.assertEqual(d.cmp(0, 1), False)
self.assertEqual(d.cmp(1, 4), False)
self.assertEqual(d.cmp(1, 5), True)
def test_delta_val2(self):
"""Delta class value, different absolute."""
d = Delta("+2.5-1.5")
self.assertEqual(d.cmp(0, 1), False)
self.assertEqual(d.cmp(1, 3), False)
self.assertEqual(d.cmp(3, 1), True)
def test_delta_val3(self):
"""Delta class value, same absolute equality."""
d = Delta("+-3.0=")
self.assertEqual(d.cmp(0, 1), False)
self.assertEqual(d.cmp(1, 4), True)
self.assertEqual(d.cmp(4, 1), True)
def test_delta_val4(self):
"""Delta class value, same percentage."""
d = Delta("+-25%")
self.assertEqual(d.cmp(0, 1), False)
self.assertEqual(d.cmp(8, 4), True)
self.assertEqual(d.cmp(8, 6), False)
def test_delta_val5(self):
"""Delta class value, same percentage equality."""
d = Delta("+-25=%")
self.assertEqual(d.cmp(0, 1), False)
self.assertEqual(d.cmp(8, 4), True)
self.assertEqual(d.cmp(8, 6), True)
def test_delta_val6(self):
"""Delta class value, different percentage equality."""
d = Delta("+50-25=%")
self.assertEqual(d.cmp(0, 1), False)
self.assertEqual(d.cmp(8, 4), True)
self.assertEqual(d.cmp(8, 6), True)
self.assertEqual(d.cmp(6, 8), False)
self.assertEqual(d.cmp(6, 9), True)
def test_delta_plus(self):
"""Delta class value 'plus only'."""
d = Delta("+50")
self.assertEqual(d.cmp(0, 50), False)
self.assertEqual(d.cmp(0, 51), True)
self.assertEqual(d.cmp(10, 5), False)
d = Delta("+50=")
self.assertEqual(d.cmp(0, 50), True)
d = Delta("+50%")
self.assertEqual(d.cmp(10, 25), True)
self.assertEqual(d.cmp(25, 10), False)
def test_delta_minus(self):
"""Delta class value 'minus only'."""
d = Delta("-50")
self.assertEqual(d.cmp(0, 50), False)
self.assertEqual(d.cmp(51, 0), True)
self.assertEqual(d.cmp(5, 10), False)
d = Delta("-50=")
self.assertEqual(d.cmp(50, 0), True)
d = Delta("-50%")
self.assertEqual(d.cmp(25, 10), True)
self.assertEqual(d.cmp(10, 25), False)
if __name__ == "__main__":
unittest.main()
|
materialsproject/pymatgen-db
|
pymatgen/db/vv/tests/test_diff.py
|
Python
|
mit
| 8,690
|
[
"pymatgen"
] |
1840553a94cdab20ecc479edc09dac96b8467336e9660764eaab1681f1ce5ecf
|
"""
Routines for interpolating forcing fields for the 3D solver.
"""
from firedrake import *
import numpy as np
import scipy.spatial.qhull as qhull
import thetis.timezone as timezone
import thetis.interpolation as interpolation
import thetis.coordsys as coordsys
from .log import *
import netCDF4
import thetis.physical_constants as physical_constants
import uptide
import uptide.tidal_netcdf
from abc import ABCMeta, abstractmethod, abstractproperty
import os
def compute_wind_stress(wind_u, wind_v, method='LargePond1981'):
r"""
Compute wind stress from atmospheric 10 m wind.
wind stress is defined as
.. math:
tau_w = C_D \rho_{air} \|U_{10}\| U_{10}
where :math:`C_D` is the drag coefficient, :math:`\rho_{air}` is the density of
air, and :math:`U_{10}` is wind speed 10 m above the sea surface.
In practice `C_D` depends on the wind speed.
Two formulation are currently implemented:
- "LargePond1981":
Wind stress formulation by [1]
- "SmithBanke1975":
Wind stress formulation by [2]
[1] Large and Pond (1981). Open Ocean Momentum Flux Measurements in
Moderate to Strong Winds. Journal of Physical Oceanography,
11(3):324-336.
https://doi.org/10.1175/1520-0485(1981)011%3C0324:OOMFMI%3E2.0.CO;2
[2] Smith and Banke (1975). Variation of the sea surface drag coefficient with
wind speed. Q J R Meteorol Soc., 101(429):665-673.
https://doi.org/10.1002/qj.49710142920
:arg wind_u, wind_v: Wind u and v components as numpy arrays
:kwarg method: Choose the stress formulation. Currently supports:
'LargePond1981' (default) or 'SmithBanke1975'.
:returns: (tau_x, tau_y) wind stress x and y components as numpy arrays
"""
rho_air = float(physical_constants['rho_air'])
wind_mag = np.hypot(wind_u, wind_v)
if method == 'LargePond1981':
CD_LOW = 1.2e-3
C_D = np.ones_like(wind_u)*CD_LOW
high_wind = wind_mag > 11.0
C_D[high_wind] = 1.0e-3*(0.49 + 0.065*wind_mag[high_wind])
elif method == 'SmithBanke1975':
C_D = (0.63 + 0.066 * wind_mag)/1000.
tau = C_D*rho_air*wind_mag
tau_x = tau*wind_u
tau_y = tau*wind_v
return tau_x, tau_y
class ATMNetCDFTime(interpolation.NetCDFTimeParser):
"""
A TimeParser class for reading WRF/NAM atmospheric forecast files.
"""
def __init__(self, filename, max_duration=24.*3600., verbose=False):
"""
:arg filename:
:kwarg max_duration: Time span to read from each file (in secords,
default one day). Forecast files are usually daily files that
contain forecast for > 1 days.
:kwarg bool verbose: Se True to print debug information.
"""
super(ATMNetCDFTime, self).__init__(filename, time_variable_name='time')
# NOTE these are daily forecast files, limit time steps to one day
self.start_time = timezone.epoch_to_datetime(float(self.time_array[0]))
self.end_time_raw = timezone.epoch_to_datetime(float(self.time_array[-1]))
self.time_step = np.mean(np.diff(self.time_array))
self.max_steps = int(max_duration / self.time_step)
self.time_array = self.time_array[:self.max_steps]
self.end_time = timezone.epoch_to_datetime(float(self.time_array[-1]))
if verbose:
print_output('Parsed file {:}'.format(filename))
print_output(' Raw time span: {:} -> {:}'.format(self.start_time, self.end_time_raw))
print_output(' Time step: {:} h'.format(self.time_step/3600.))
print_output(' Restricting duration to {:} h -> keeping {:} steps'.format(max_duration/3600., self.max_steps))
print_output(' New time span: {:} -> {:}'.format(self.start_time, self.end_time))
class ATMInterpolator(object):
"""
Interpolates WRF/NAM atmospheric model data on 2D fields.
"""
def __init__(self, function_space, wind_stress_field,
atm_pressure_field, to_latlon,
ncfile_pattern, init_date, target_coordsys, verbose=False):
"""
:arg function_space: Target (scalar) :class:`FunctionSpace` object onto
which data will be interpolated.
:arg wind_stress_field: A 2D vector :class:`Function` where the output
wind stress will be stored.
:arg atm_pressure_field: A 2D scalar :class:`Function` where the output
atmospheric pressure will be stored.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg ncfile_pattern: A file name pattern for reading the atmospheric
model output files. E.g. 'forcings/nam_air.local.2006_*.nc'
:arg init_date: A :class:`datetime` object that indicates the start
date/time of the Thetis simulation. Must contain time zone. E.g.
'datetime(2006, 5, 1, tzinfo=pytz.utc)'
:arg target_coordsys: coordinate system in which the model grid is
defined. This is used to rotate vectors to local coordinates.
:kwarg bool verbose: Se True to print debug information.
"""
self.function_space = function_space
self.wind_stress_field = wind_stress_field
self.atm_pressure_field = atm_pressure_field
# construct interpolators
self.grid_interpolator = interpolation.NetCDFLatLonInterpolator2d(self.function_space, to_latlon)
self.reader = interpolation.NetCDFSpatialInterpolator(self.grid_interpolator, ['uwind', 'vwind', 'prmsl'])
self.timesearch_obj = interpolation.NetCDFTimeSearch(ncfile_pattern, init_date, ATMNetCDFTime, verbose=verbose)
self.time_interpolator = interpolation.LinearTimeInterpolator(self.timesearch_obj, self.reader)
lon = self.grid_interpolator.mesh_lonlat[:, 0]
lat = self.grid_interpolator.mesh_lonlat[:, 1]
self.vect_rotator = coordsys.VectorCoordSysRotation(
coordsys.LL_WGS84, target_coordsys, lon, lat)
def set_fields(self, time):
"""
Evaluates forcing fields at the given time.
Performs interpolation and updates the output wind stress and
atmospheric pressure fields in place.
:arg float time: Thetis simulation time in seconds.
"""
lon_wind, lat_wind, prmsl = self.time_interpolator(time)
u_wind, v_wind = self.vect_rotator(lon_wind, lat_wind)
u_stress, v_stress = compute_wind_stress(u_wind, v_wind)
self.wind_stress_field.dat.data_with_halos[:, 0] = u_stress
self.wind_stress_field.dat.data_with_halos[:, 1] = v_stress
self.atm_pressure_field.dat.data_with_halos[:] = prmsl
class SpatialInterpolatorNCOMBase(interpolation.SpatialInterpolator):
"""
Base class for 2D and 3D NCOM spatial interpolators.
"""
def __init__(self, function_space, to_latlon, grid_path):
"""
:arg function_space: Target (scalar) :class:`FunctionSpace` object onto
which data will be interpolated.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg grid_path: File path where the NCOM model grid files
('model_lat.nc', 'model_lon.nc', 'model_zm.nc') are located.
"""
self.function_space = function_space
self.grid_path = grid_path
self._initialized = False
def _create_2d_mapping(self, ncfile):
"""
Create map for 2D nodes.
"""
# read source lat lon grid
lat_full = self._get_forcing_grid('model_lat.nc', 'Lat')
lon_full = self._get_forcing_grid('model_lon.nc', 'Long')
x_ind = ncfile['X_Index'][:].astype(int)
y_ind = ncfile['Y_Index'][:].astype(int)
lon = lon_full[y_ind, :][:, x_ind]
lat = lat_full[y_ind, :][:, x_ind]
# find where data values are not defined
varkey = None
for k in ncfile.variables.keys():
if k not in ['X_Index', 'Y_Index', 'level']:
varkey = k
break
assert varkey is not None, 'Could not find variable in file'
vals = ncfile[varkey][:] # shape (nz, lat, lon) or (lat, lon)
is3d = len(vals.shape) == 3
land_mask = np.all(vals.mask, axis=0) if is3d else vals.mask
# build 2d mask
mask_good_values = ~land_mask
# neighborhood mask with bounding box
mask_cover = np.zeros_like(mask_good_values)
buffer = 0.2
lat_min = self.latlonz_array[:, 0].min() - buffer
lat_max = self.latlonz_array[:, 0].max() + buffer
lon_min = self.latlonz_array[:, 1].min() - buffer
lon_max = self.latlonz_array[:, 1].max() + buffer
mask_cover[(lat >= lat_min)
* (lat <= lat_max)
* (lon >= lon_min)
* (lon <= lon_max)] = True
mask_cover *= mask_good_values
# include nearest valid neighbors
# needed for nearest neighbor filling
from scipy.spatial import cKDTree
good_lat = lat[mask_good_values]
good_lon = lon[mask_good_values]
ll = np.vstack([good_lat.ravel(), good_lon.ravel()]).T
dist, ix = cKDTree(ll).query(self.latlonz_array[:, :2])
ix = np.unique(ix)
ix = np.nonzero(mask_good_values.ravel())[0][ix]
a, b = np.unravel_index(ix, lat.shape)
mask_nn = np.zeros_like(mask_good_values)
mask_nn[a, b] = True
# final mask
mask = mask_cover + mask_nn
self.nodes = np.nonzero(mask.ravel())[0]
self.ind_lat, self.ind_lon = np.unravel_index(self.nodes, lat.shape)
lat_subset = lat[self.ind_lat, self.ind_lon]
lon_subset = lon[self.ind_lat, self.ind_lon]
assert len(lat_subset) > 0, 'rank {:} has no source lat points'
assert len(lon_subset) > 0, 'rank {:} has no source lon points'
return lon_subset, lat_subset, x_ind, y_ind, vals
def _get_forcing_grid(self, filename, varname):
"""
Helper function to load NCOM grid files.
"""
v = None
with netCDF4.Dataset(os.path.join(self.grid_path, filename), 'r') as ncfile:
v = ncfile[varname][:]
return v
class SpatialInterpolatorNCOM3d(SpatialInterpolatorNCOMBase):
"""
Spatial interpolator class for interpolatin NCOM ocean model 3D fields.
"""
def __init__(self, function_space, to_latlon, grid_path):
"""
:arg function_space: Target (scalar) :class:`FunctionSpace` object onto
which data will be interpolated.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg grid_path: File path where the NCOM model grid files
('model_lat.nc', 'model_lon.nc', 'model_zm.nc') are located.
"""
super().__init__(function_space, to_latlon, grid_path)
# construct local coordinates
xyz = SpatialCoordinate(self.function_space.mesh())
tmp_func = self.function_space.get_work_function()
xyz_array = np.zeros((tmp_func.dat.data_with_halos.shape[0], 3))
for i in range(3):
tmp_func.interpolate(xyz[i])
xyz_array[:, i] = tmp_func.dat.data_with_halos[:]
self.function_space.restore_work_function(tmp_func)
self.latlonz_array = np.zeros_like(xyz_array)
lat, lon = to_latlon(xyz_array[:, 0], xyz_array[:, 1], positive_lon=True)
self.latlonz_array[:, 0] = lat
self.latlonz_array[:, 1] = lon
self.latlonz_array[:, 2] = xyz_array[:, 2]
def _create_interpolator(self, ncfile):
"""
Create a compact interpolator by finding the minimal necessary support
"""
lon_subset, lat_subset, x_ind, y_ind, vals = self._create_2d_mapping(ncfile)
# find 3d mask where data is not defined
vals = vals[:, self.ind_lat, self.ind_lon]
self.good_mask_3d = ~vals.mask
# construct vertical grid
zm = self._get_forcing_grid('model_zm.nc', 'zm')
zm = zm[:, y_ind, :][:, :, x_ind]
grid_z = zm[:, self.ind_lat, self.ind_lon] # shape (nz, nlatlon)
grid_z = grid_z.filled(-5000.)
# nudge water surface higher for interpolation
grid_z[0, :] = 1.5
nz = grid_z.shape[0]
# data shape is [nz, neta*nxi]
grid_lat = np.tile(lat_subset, (nz, 1))[self.good_mask_3d]
grid_lon = np.tile(lon_subset, (nz, 1))[self.good_mask_3d]
grid_z = grid_z[self.good_mask_3d]
if np.ma.isMaskedArray(grid_lat):
grid_lat = grid_lat.filled(0.0)
if np.ma.isMaskedArray(grid_lon):
grid_lon = grid_lon.filled(0.0)
if np.ma.isMaskedArray(grid_z):
grid_z = grid_z.filled(0.0)
grid_latlonz = np.vstack((grid_lat, grid_lon, grid_z)).T
# building 3D interpolator, this can take a long time (minutes)
print_output('Constructing 3D GridInterpolator...')
self.interpolator = interpolation.GridInterpolator(
grid_latlonz, self.latlonz_array,
normalize=True, fill_mode='nearest', dont_raise=True
)
print_output('done.')
self._initialized = True
def interpolate(self, nc_filename, variable_list, itime):
"""
Calls the interpolator object
"""
with netCDF4.Dataset(nc_filename, 'r') as ncfile:
if not self._initialized:
self._create_interpolator(ncfile)
output = []
for var in variable_list:
assert var in ncfile.variables
grid_data = ncfile[var][:][:, self.ind_lat, self.ind_lon][self.good_mask_3d]
data = self.interpolator(grid_data)
output.append(data)
return output
class SpatialInterpolatorNCOM2d(SpatialInterpolatorNCOMBase):
"""
Spatial interpolator class for interpolatin NCOM ocean model 2D fields.
"""
def __init__(self, function_space, to_latlon, grid_path):
"""
:arg function_space: Target (scalar) :class:`FunctionSpace` object onto
which data will be interpolated.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg grid_path: File path where the NCOM model grid files
('model_lat.nc', 'model_lon.nc', 'model_zm.nc') are located.
"""
super().__init__(function_space, to_latlon, grid_path)
# construct local coordinates
xyz = SpatialCoordinate(self.function_space.mesh())
tmp_func = self.function_space.get_work_function()
xy_array = np.zeros((tmp_func.dat.data_with_halos.shape[0], 2))
for i in range(2):
tmp_func.interpolate(xyz[i])
xy_array[:, i] = tmp_func.dat.data_with_halos[:]
self.function_space.restore_work_function(tmp_func)
self.latlonz_array = np.zeros_like(xy_array)
lat, lon = to_latlon(xy_array[:, 0], xy_array[:, 1], positive_lon=True)
self.latlonz_array[:, 0] = lat
self.latlonz_array[:, 1] = lon
def _create_interpolator(self, ncfile):
"""
Create a compact interpolator by finding the minimal necessary support
"""
lon_subset, lat_subset, x_ind, y_ind, vals = self._create_2d_mapping(ncfile)
grid_lat = lat_subset
grid_lon = lon_subset
if np.ma.isMaskedArray(grid_lat):
grid_lat = grid_lat.filled(0.0)
if np.ma.isMaskedArray(grid_lon):
grid_lon = grid_lon.filled(0.0)
grid_latlon = np.vstack((grid_lat, grid_lon)).T
# building 3D interpolator, this can take a long time (minutes)
self.interpolator = interpolation.GridInterpolator(
grid_latlon, self.latlonz_array,
normalize=False, fill_mode='nearest', dont_raise=True
)
self._initialized = True
def interpolate(self, nc_filename, variable_list, itime):
"""
Calls the interpolator object
"""
with netCDF4.Dataset(nc_filename, 'r') as ncfile:
if not self._initialized:
self._create_interpolator(ncfile)
output = []
for var in variable_list:
assert var in ncfile.variables
grid_data = ncfile[var][:][self.ind_lat, self.ind_lon]
data = self.interpolator(grid_data)
output.append(data)
return output
class NCOMInterpolator(object):
"""
Interpolates NCOM model data on 3D fields.
.. note::
The following NCOM output files must be present:
./forcings/ncom/model_h.nc
./forcings/ncom/model_lat.nc
./forcings/ncom/model_ang.nc
./forcings/ncom/model_lon.nc
./forcings/ncom/model_zm.nc
./forcings/ncom/2006/s3d/s3d.glb8_2f_2006041900.nc
./forcings/ncom/2006/s3d/s3d.glb8_2f_2006042000.nc
./forcings/ncom/2006/t3d/t3d.glb8_2f_2006041900.nc
./forcings/ncom/2006/t3d/t3d.glb8_2f_2006042000.nc
./forcings/ncom/2006/u3d/u3d.glb8_2f_2006041900.nc
./forcings/ncom/2006/u3d/u3d.glb8_2f_2006042000.nc
./forcings/ncom/2006/v3d/v3d.glb8_2f_2006041900.nc
./forcings/ncom/2006/v3d/v3d.glb8_2f_2006042000.nc
./forcings/ncom/2006/ssh/ssh.glb8_2f_2006041900.nc
./forcings/ncom/2006/ssh/ssh.glb8_2f_2006042000.nc
"""
def __init__(self, function_space_2d, function_space_3d, fields, field_names, field_fnstr,
to_latlon, basedir,
file_pattern, init_date, target_coordsys, verbose=False):
"""
:arg function_space_2d: Target (scalar) :class:`FunctionSpace` object onto
which 2D data will be interpolated.
:arg function_space_3d: Target (scalar) :class:`FunctionSpace` object onto
which 3D data will be interpolated.
:arg fields: list of :class:`Function` objects where data will be
stored.
:arg field_names: List of netCDF variable names for the fields. E.g.
['Salinity', 'Temperature'].
:arg field_fnstr: List of variables in netCDF file names. E.g.
['s3d', 't3d'].
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg basedir: Root dir where NCOM files are stored.
E.g. '/forcings/ncom'.
:arg file_pattern: A file name pattern for reading the NCOM output
files (excluding the basedir). E.g.
{year:04d}/{fieldstr:}/{fieldstr:}.glb8_2f_{year:04d}{month:02d}{day:02d}00.nc'.
:arg init_date: A :class:`datetime` object that indicates the start
date/time of the Thetis simulation. Must contain time zone. E.g.
'datetime(2006, 5, 1, tzinfo=pytz.utc)'
:arg target_coordsys: coordinate system in which the model grid is
defined. This is used to rotate vectors to local coordinates.
:kwarg bool verbose: Se True to print debug information.
"""
self.function_space_2d = function_space_2d
self.function_space_3d = function_space_3d
for f in fields:
assert f.function_space() in [self.function_space_2d, self.function_space_3d], 'field \'{:}\' does not belong to given function space.'.format(f.name())
assert len(fields) == len(field_names)
assert len(fields) == len(field_fnstr)
self.field_names = field_names
self.fields = dict(zip(self.field_names, fields))
# construct interpolators
self.grid_interpolator_2d = SpatialInterpolatorNCOM2d(self.function_space_2d, to_latlon, basedir)
self.grid_interpolator_3d = SpatialInterpolatorNCOM3d(self.function_space_3d, to_latlon, basedir)
# each field is in different file
# construct time search and interp objects separately for each
self.time_interpolator = {}
for ncvarname, fnstr in zip(field_names, field_fnstr):
gi = self.grid_interpolator_2d if fnstr == 'ssh' else self.grid_interpolator_3d
r = interpolation.NetCDFSpatialInterpolator(gi, [ncvarname])
pat = file_pattern.replace('{fieldstr:}', fnstr)
pat = os.path.join(basedir, pat)
ts = interpolation.DailyFileTimeSearch(pat, init_date, verbose=verbose)
ti = interpolation.LinearTimeInterpolator(ts, r)
self.time_interpolator[ncvarname] = ti
# construct velocity rotation object
self.rotate_velocity = ('U_Velocity' in field_names
and 'V_Velocity' in field_names)
self.scalar_field_names = list(self.field_names)
if self.rotate_velocity:
self.scalar_field_names.remove('U_Velocity')
self.scalar_field_names.remove('V_Velocity')
lat = self.grid_interpolator_3d.latlonz_array[:, 0]
lon = self.grid_interpolator_3d.latlonz_array[:, 1]
self.vect_rotator = coordsys.VectorCoordSysRotation(
coordsys.LL_WGS84, target_coordsys, lon, lat)
def set_fields(self, time):
"""
Evaluates forcing fields at the given time
"""
if self.rotate_velocity:
# water_u (meter/sec) = Eastward Water Velocity
# water_v (meter/sec) = Northward Water Velocity
lon_vel = self.time_interpolator['U_Velocity'](time)[0]
lat_vel = self.time_interpolator['V_Velocity'](time)[0]
u, v = self.vect_rotator(lon_vel, lat_vel)
self.fields['U_Velocity'].dat.data_with_halos[:] = u
self.fields['V_Velocity'].dat.data_with_halos[:] = v
for fname in self.scalar_field_names:
vals = self.time_interpolator[fname](time)[0]
self.fields[fname].dat.data_with_halos[:] = vals
class SpatialInterpolatorROMS3d(interpolation.SpatialInterpolator):
"""
Abstract spatial interpolator class that can interpolate onto a Function
"""
def __init__(self, function_space, to_latlon):
"""
:arg function_space: target Firedrake FunctionSpace
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
"""
self.function_space = function_space
# construct local coordinates
xyz = SpatialCoordinate(self.function_space.mesh())
tmp_func = self.function_space.get_work_function()
xyz_array = np.zeros((tmp_func.dat.data_with_halos.shape[0], 3))
for i in range(3):
tmp_func.interpolate(xyz[i])
xyz_array[:, i] = tmp_func.dat.data_with_halos[:]
self.function_space.restore_work_function(tmp_func)
self.latlonz_array = np.zeros_like(xyz_array)
lat, lon = to_latlon(xyz_array[:, 0], xyz_array[:, 1])
self.latlonz_array[:, 0] = lat
self.latlonz_array[:, 1] = lon
self.latlonz_array[:, 2] = xyz_array[:, 2]
self._initialized = False
def _get_subset_nodes(self, grid_x, grid_y, target_x, target_y):
"""
Retuns grid nodes that are necessary for intepolating onto target_x,y
"""
orig_shape = grid_x.shape
grid_xy = np.array((grid_x.ravel(), grid_y.ravel())).T
target_xy = np.array((target_x.ravel(), target_y.ravel())).T
tri = qhull.Delaunay(grid_xy)
simplex = tri.find_simplex(target_xy)
vertices = np.take(tri.simplices, simplex, axis=0)
nodes = np.unique(vertices.ravel())
nodes_x, nodes_y = np.unravel_index(nodes, orig_shape)
return nodes, nodes_x, nodes_y
def _compute_roms_z_coord(self, ncfile, constant_zeta=None):
zeta = ncfile['zeta'][0, :, :]
bath = ncfile['h'][:]
# NOTE compute z coordinates for full levels (w)
cs = ncfile['Cs_w'][:]
s = ncfile['s_w'][:]
hc = ncfile['hc'][:]
# ROMS transformation ver. 2:
# z(x, y, sigma, t) = zeta(x, y, t) + (zeta(x, y, t) + h(x, y))*S(x, y, sigma)
zeta = zeta[self.ind_lat, self.ind_lon][self.mask].filled(0.0)
bath = bath[self.ind_lat, self.ind_lon][self.mask]
if constant_zeta:
zeta = np.ones_like(bath)*constant_zeta
ss = (hc*s[:, np.newaxis] + bath[np.newaxis, :]*cs[:, np.newaxis])/(hc + bath[np.newaxis, :])
grid_z_w = zeta[np.newaxis, :]*(1 + ss) + bath[np.newaxis, :]*ss
grid_z = 0.5*(grid_z_w[1:, :] + grid_z_w[:-1, :])
grid_z[0, :] = grid_z_w[0, :]
grid_z[-1, :] = grid_z_w[-1, :]
return grid_z
def _create_interpolator(self, ncfile):
"""
Create compact interpolator by finding the minimal necessary support
"""
lat = ncfile['lat_rho'][:]
lon = ncfile['lon_rho'][:]
self.mask = ncfile['mask_rho'][:].astype(bool)
self.nodes, self.ind_lat, self.ind_lon = self._get_subset_nodes(lat, lon, self.latlonz_array[:, 0], self.latlonz_array[:, 1])
lat_subset = lat[self.ind_lat, self.ind_lon]
lon_subset = lon[self.ind_lat, self.ind_lon]
self.mask = self.mask[self.ind_lat, self.ind_lon]
# COMPUTE z coords for constant elevation=0.1
grid_z = self._compute_roms_z_coord(ncfile, constant_zeta=0.1)
# omit land mask
lat_subset = lat_subset[self.mask]
lon_subset = lon_subset[self.mask]
nz = grid_z.shape[0]
# data shape is [nz, neta, nxi]
grid_lat = np.tile(lat_subset, (nz, 1, 1)).ravel()
grid_lon = np.tile(lon_subset, (nz, 1, 1)).ravel()
grid_z = grid_z.ravel()
if np.ma.isMaskedArray(grid_lat):
grid_lat = grid_lat.filled(0.0)
if np.ma.isMaskedArray(grid_lon):
grid_lon = grid_lon.filled(0.0)
if np.ma.isMaskedArray(grid_z):
grid_z = grid_z.filled(0.0)
grid_latlonz = np.vstack((grid_lat, grid_lon, grid_z)).T
# building 3D interpolator, this can take a long time (minutes)
print_output('Constructing 3D GridInterpolator...')
self.interpolator = interpolation.GridInterpolator(
grid_latlonz, self.latlonz_array, normalize=True,
fill_mode='nearest'
)
print_output('done.')
self._initialized = True
def interpolate(self, nc_filename, variable_list, itime):
"""
Calls the interpolator object
"""
with netCDF4.Dataset(nc_filename, 'r') as ncfile:
if not self._initialized:
self._create_interpolator(ncfile)
output = []
for var in variable_list:
assert var in ncfile.variables
grid_data = ncfile[var][itime, :, :, :][:, self.ind_lat, self.ind_lon][:, self.mask].filled(np.nan).ravel()
data = self.interpolator(grid_data)
output.append(data)
return output
class LiveOceanInterpolator(object):
"""
Interpolates LiveOcean (ROMS) model data on 3D fields
"""
def __init__(self, function_space, fields, field_names, ncfile_pattern, init_date, to_latlon):
self.function_space = function_space
for f in fields:
assert f.function_space() == self.function_space, 'field \'{:}\' does not belong to given function space {:}.'.format(f.name(), self.function_space.name)
assert len(fields) == len(field_names)
self.fields = fields
self.field_names = field_names
# construct interpolators
self.grid_interpolator = SpatialInterpolatorROMS3d(self.function_space, to_latlon)
self.reader = interpolation.NetCDFSpatialInterpolator(self.grid_interpolator, field_names)
self.timesearch_obj = interpolation.NetCDFTimeSearch(ncfile_pattern, init_date, interpolation.NetCDFTimeParser, time_variable_name='ocean_time', verbose=False)
self.time_interpolator = interpolation.LinearTimeInterpolator(self.timesearch_obj, self.reader)
def set_fields(self, time):
"""
Evaluates forcing fields at the given time
"""
vals = self.time_interpolator(time)
for i in range(len(self.fields)):
self.fields[i].dat.data_with_halos[:] = vals[i]
class TidalBoundaryForcing(object):
"""Base class for tidal boundary interpolators."""
__metaclass__ = ABCMeta
@abstractproperty
def coord_layout():
"""
Data layout in the netcdf files.
Either 'lon,lat' or 'lat,lon'.
"""
return 'lon,lat'
@abstractproperty
def compute_velocity():
"""If True, compute tidal currents as well."""
return False
@abstractproperty
def elev_nc_file():
"""Tidal elavation NetCDF file name."""
return None
@abstractproperty
def uv_nc_file():
"""Tidal velocity NetCDF file name."""
return None
@abstractproperty
def grid_nc_file():
"""Grid NetCDF file name."""
return None
def __init__(self, elev_field, init_date, to_latlon, target_coordsys,
uv_field=None, constituents=None, boundary_ids=None,
data_dir=None):
"""
:arg elev_field: Function where tidal elevation will be interpolated.
:arg init_date: Datetime object defining the simulation init time.
:arg to_latlon: Python function that converts local mesh coordinates to
latitude and longitude: 'lat, lon = to_latlon(x, y)'
:arg target_coordsys: coordinate system in which the model grid is
defined. This is used to rotate vectors to local coordinates.
:kwarg uv_field: Function where tidal transport will be interpolated.
:kwarg constituents: list of tidal constituents, e.g. ['M2', 'K1']
:kwarg boundary_ids: list of boundary_ids where tidal data will be
evaluated. If not defined, tides will be in evaluated in the entire
domain.
:kward data_dir: path to directory where tidal model netCDF files are
located.
"""
assert init_date.tzinfo is not None, 'init_date must have time zone information'
if constituents is None:
constituents = ['Q1', 'O1', 'P1', 'K1', 'N2', 'M2', 'S2', 'K2']
self.data_dir = data_dir if data_dir is not None else ''
if not self.compute_velocity and uv_field is not None:
warning('{:}: uv_field is defined but velocity computation is not supported. uv_field will be ignored.'.format(__class__.__name__))
self.compute_velocity = self.compute_velocity and uv_field is not None
# determine nodes at the boundary
self.elev_field = elev_field
self.uv_field = uv_field
fs = elev_field.function_space()
if boundary_ids is None:
# interpolate in the whole domain
self.nodes = np.arange(self.elev_field.dat.data_with_halos.shape[0])
else:
bc = DirichletBC(fs, 0., boundary_ids, method='geometric')
self.nodes = bc.nodes
self._empty_set = self.nodes.size == 0
xy = SpatialCoordinate(fs.mesh())
fsx = Function(fs).interpolate(xy[0]).dat.data_ro_with_halos
fsy = Function(fs).interpolate(xy[1]).dat.data_ro_with_halos
if not self._empty_set:
latlon = []
for node in self.nodes:
x, y = fsx[node], fsy[node]
lat, lon = to_latlon(x, y, positive_lon=True)
latlon.append((lat, lon))
self.latlon = np.array(latlon)
# compute bounding box
bounds_lat = [self.latlon[:, 0].min(), self.latlon[:, 0].max()]
bounds_lon = [self.latlon[:, 1].min(), self.latlon[:, 1].max()]
if self.coord_layout == 'lon,lat':
self.ranges = (bounds_lon, bounds_lat)
else:
self.ranges = (bounds_lat, bounds_lon)
self.tide = uptide.Tides(constituents)
self.tide.set_initial_time(init_date)
self._create_readers()
if self.compute_velocity:
lat = self.latlon[:, 0]
lon = self.latlon[:, 1]
self.vect_rotator = coordsys.VectorCoordSysRotation(
coordsys.LL_WGS84, target_coordsys, lon, lat)
@abstractmethod
def _create_readers(self, ):
"""Create uptide netcdf reader objects."""
pass
def set_tidal_field(self, t):
if not self._empty_set:
self.tnci.set_time(t)
if self.compute_velocity:
self.tnciu.set_time(t)
self.tnciv.set_time(t)
elev_data = self.elev_field.dat.data_with_halos
if self.compute_velocity:
uv_data = self.uv_field.dat.data_with_halos
for i, node in enumerate(self.nodes):
lat, lon = self.latlon[i, :]
point = (lon, lat) if self.coord_layout == 'lon,lat' else (lat, lon)
try:
elev = self.tnci.get_val(point, allow_extrapolation=True)
elev_data[node] = elev
except uptide.netcdf_reader.CoordinateError:
elev_data[node] = 0.
if self.compute_velocity:
try:
lon_vel = self.tnciu.get_val(point, allow_extrapolation=True)
lat_vel = self.tnciv.get_val(point, allow_extrapolation=True)
u, v = self.vect_rotator(lon_vel, lat_vel, i_node=i)
uv_data[node, :] = (u, v)
except uptide.netcdf_reader.CoordinateError:
uv_data[node, :] = (0, 0)
class TPXOTidalBoundaryForcing(TidalBoundaryForcing):
"""Tidal boundary interpolator for TPXO tidal model."""
elev_nc_file = 'h_tpxo9.v1.nc'
uv_nc_file = 'u_tpxo9.v1.nc'
grid_nc_file = 'grid_tpxo9.nc'
coord_layout = 'lon,lat'
compute_velocity = True
def _create_readers(self, ):
"""Create uptide netcdf reader objects."""
msg = 'File {:} not found, download it from \nftp://ftp.oce.orst.edu/dist/tides/Global/tpxo9_netcdf.tar.gz'
f_grid = os.path.join(self.data_dir, self.grid_nc_file)
assert os.path.exists(f_grid), msg.format(f_grid)
f_elev = os.path.join(self.data_dir, self.elev_nc_file)
assert os.path.exists(f_elev), msg.format(f_elev)
self.tnci = uptide.tidal_netcdf.OTPSncTidalInterpolator(self.tide, f_grid, f_elev, ranges=self.ranges)
if self.uv_field is not None:
f_uv = os.path.join(self.data_dir, self.uv_nc_file)
assert os.path.exists(f_uv), msg.format(f_uv)
self.tnciu = uptide.tidal_netcdf.OTPSncTidalComponentInterpolator(self.tide, f_grid, f_uv, 'u', 'u', ranges=self.ranges)
self.tnciv = uptide.tidal_netcdf.OTPSncTidalComponentInterpolator(self.tide, f_grid, f_uv, 'v', 'v', ranges=self.ranges)
class FES2004TidalBoundaryForcing(TidalBoundaryForcing):
"""Tidal boundary interpolator for FES2004 tidal model."""
elev_nc_file = 'tide.fes2004.nc'
uv_nc_file = None
grid_nc_file = None
coord_layout = 'lat,lon'
compute_velocity = False
def _create_readers(self, ):
"""Create uptide netcdf reader objects."""
f_elev = os.path.join(self.data_dir, self.elev_nc_file)
msg = 'File {:} not found, download it from \nftp://ftp.legos.obs-mip.fr/pub/soa/maree/tide_model/global_solution/fes2004/'.format(f_elev)
assert os.path.exists(f_elev), msg
self.tnci = uptide.tidal_netcdf.FESTidalInterpolator(self.tide, f_elev, ranges=self.ranges)
|
tkarna/cofs
|
thetis/forcing.py
|
Python
|
mit
| 35,873
|
[
"NetCDF"
] |
d36e1db5f7cd51cc14478b1dc2dd54d02f2621f7a0892ad5588ec9194533c16a
|
""" StorageOccupancy records the Storage Elements occupancy over time
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.AccountingSystem.Client.Types.BaseAccountingType import BaseAccountingType
class StorageOccupancy(BaseAccountingType):
"""StorageOccupancy as extension of BaseAccountingType.
It is filled by the RSS Command FreeDiskSpace every time the
command is executed (from Agent CacheFeederAgent)
"""
def __init__(self):
"""constructor."""
super(StorageOccupancy, self).__init__()
self.definitionKeyFields = [
("Site", "VARCHAR(64)"),
("Endpoint", "VARCHAR(255)"),
("StorageElement", "VARCHAR(64)"),
("SpaceType", "VARCHAR(64)"),
] # (Total, Free, Used)
self.definitionAccountingFields = [("Space", "BIGINT UNSIGNED")]
self.bucketsLength = [
(86400 * 2, 3600), # <2d = 1h
(86400 * 10, 3600 * 6), # <10d = 6h
(86400 * 40, 3600 * 12), # <40d = 12h
(86400 * 30 * 6, 86400 * 2), # <6m = 2d
(86400 * 600, 86400 * 7), # >6m = 1w
]
self.checkType()
|
ic-hep/DIRAC
|
src/DIRAC/AccountingSystem/Client/Types/StorageOccupancy.py
|
Python
|
gpl-3.0
| 1,252
|
[
"DIRAC"
] |
675a560c40e6adca86960c96e69e16e6c620edde8ce75a26b0aabffac5bfb537
|
##
# Copyright 2015-2018 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Molpro, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil
import re
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.binary import Binary
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.filetools import apply_regex_substitutions, mkdir, read_file
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_Molpro(ConfigureMake, Binary):
"""Support for building and installing Molpro."""
@staticmethod
def extra_options():
"""Define custom easyconfig parameters for Molpro."""
# Combine extra variables from Binary and ConfigureMake easyblocks as
# well as those needed for Molpro specifically
extra_vars = Binary.extra_options()
extra_vars = ConfigureMake.extra_options(extra_vars)
extra_vars.update({
'precompiled_binaries': [False, "Are we installing precompiled binaries?", CUSTOM],
})
return EasyBlock.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Easyblock constructor, initialize class variables specific to Molpro and check on license token."""
super(EB_Molpro, self).__init__(*args, **kwargs)
self.full_prefix = '' # no None, to make easyblock compatible with --module-only
self.orig_launcher = None
self.cleanup_token_symlink = False
self.license_token = os.path.join(os.path.expanduser('~'), '.molpro', 'token')
def extract_step(self):
"""Extract Molpro source files, or just copy in case of binary install."""
if self.cfg['precompiled_binaries']:
Binary.extract_step(self)
else:
ConfigureMake.extract_step(self)
def configure_step(self):
"""Custom configuration procedure for Molpro: use 'configure -batch'."""
if not os.path.isfile(self.license_token):
if self.cfg['license_file'] is not None and os.path.isfile(self.cfg['license_file']):
# put symlink in place to specified license file in $HOME/.molpro/token
# other approaches (like defining $MOLPRO_KEY) don't seem to work
self.cleanup_token_symlink = True
mkdir(os.path.dirname(self.license_token))
try:
os.symlink(self.cfg['license_file'], self.license_token)
self.log.debug("Symlinked %s to %s", self.cfg['license_file'], self.license_token)
except OSError, err:
raise EasyBuildError("Failed to create symlink for license token at %s", self.license_token)
else:
self.log.warning("No licence token found at either {0} or via 'license_file'".format(self.license_token))
# Only do the rest of the configuration if we're building from source
if not self.cfg['precompiled_binaries']:
# installation prefix
self.cfg.update('configopts', "-prefix %s" % self.installdir)
# compilers
# compilers & MPI
if self.toolchain.options.get('usempi', None):
self.cfg.update('configopts', "-%s -%s" % (os.environ['CC_SEQ'], os.environ['F90_SEQ']))
if 'MPI_INC_DIR' in os.environ:
self.cfg.update('configopts', "-mpp -mppbase %s" % os.environ['MPI_INC_DIR'])
else:
raise EasyBuildError("$MPI_INC_DIR not defined")
else:
self.cfg.update('configopts', "-%s -%s" % (os.environ['CC'], os.environ['F90']))
# BLAS/LAPACK
if 'BLAS_LIB_DIR' in os.environ:
self.cfg.update('configopts', "-blas -blaspath %s" % os.environ['BLAS_LIB_DIR'])
else:
raise EasyBuildError("$BLAS_LIB_DIR not defined")
if 'LAPACK_LIB_DIR' in os.environ:
self.cfg.update('configopts', "-lapack -lapackpath %s" % os.environ['LAPACK_LIB_DIR'])
else:
raise EasyBuildError("$LAPACK_LIB_DIR not defined")
# 32 vs 64 bit
if self.toolchain.options.get('32bit', None):
self.cfg.update('configopts', '-i4')
else:
self.cfg.update('configopts', '-i8')
run_cmd("./configure -batch %s" % self.cfg['configopts'])
cfgfile = os.path.join(self.cfg['start_dir'], 'CONFIG')
cfgtxt = read_file(cfgfile)
# determine original LAUNCHER value
launcher_regex = re.compile('^LAUNCHER=(.*)$', re.M)
res = launcher_regex.search(cfgtxt)
if res:
self.orig_launcher = res.group(1)
self.log.debug("Found original value for LAUNCHER: %s", self.orig_launcher)
else:
raise EasyBuildError("Failed to determine LAUNCHER value")
# determine full installation prefix
prefix_regex = re.compile('^PREFIX=(.*)$', re.M)
res = prefix_regex.search(cfgtxt)
if res:
self.full_prefix = res.group(1)
self.log.debug("Found full installation prefix: %s", self.full_prefix)
else:
raise EasyBuildError("Failed to determine full installation prefix")
# determine MPI launcher command that can be used during build/test
# obtain command with specific number of cores (required by mpi_cmd_for), then replace that number with '%n'
launcher = self.toolchain.mpi_cmd_for('%x', self.cfg['parallel'])
launcher = launcher.replace(' %s' % self.cfg['parallel'], ' %n')
# patch CONFIG file to change LAUNCHER definition, in order to avoid having to start mpd
apply_regex_substitutions(cfgfile, [(r"^(LAUNCHER\s*=\s*).*$", r"\1 %s" % launcher)])
# reread CONFIG and log contents
cfgtxt = read_file(cfgfile)
self.log.info("Contents of CONFIG file:\n%s", cfgtxt)
def build_step(self):
"""Custom build procedure for Molpro, unless it is a binary install."""
if not self.cfg['precompiled_binaries']:
super(EB_Molpro, self).build_step()
def test_step(self):
"""
Custom test procedure for Molpro.
Run 'make quicktest, make test', but only for source install and if license is available.
"""
# Only bother to check if the licence token is available
if os.path.isfile(self.license_token) and not self.cfg['precompiled_binaries']:
# check 'main routes' only
run_cmd("make quicktest")
if build_option('mpi_tests'):
# extensive test
run_cmd("make MOLPRO_OPTIONS='-n%s' test" % self.cfg['parallel'])
else:
self.log.info("Skipping extensive testing of Molpro since MPI testing is disabled")
def install_step(self):
"""
Custom install procedure for Molpro.
For source install:
* put license token in place in $installdir/.token
* run 'make tuning'
* install with 'make install'
For binary install:
* run interactive installer
"""
if self.cfg['precompiled_binaries']:
"""Build by running the command with the inputfiles"""
try:
os.chdir(self.cfg['start_dir'])
except OSError, err:
raise EasyBuildError("Failed to move (back) to %s: %s", self.cfg['start_dir'], err)
for src in self.src:
if LooseVersion(self.version) >= LooseVersion('2015'):
# install dir must be non-existent
shutil.rmtree(self.installdir)
cmd = "./{0} -batch -prefix {1}".format(src['name'], self.installdir)
else:
cmd = "./{0} -batch -instbin {1}/bin -instlib {1}/lib".format(src['name'], self.installdir)
# questions whose text must match exactly as asked
qa = {
"Please give your username for accessing molpro\n": '',
"Please give your password for accessing molpro\n": '',
}
# questions whose text may be matched as a regular expression
stdqa = {
r"Enter installation directory for executable files \[.*\]\n": os.path.join(self.installdir, 'bin'),
r"Enter installation directory for library files \[.*\]\n": os.path.join(self.installdir, 'lib'),
r"directory .* does not exist, try to create [Y]/n\n": '',
}
run_cmd_qa(cmd, qa=qa, std_qa=stdqa, log_all=True, simple=True)
else:
if os.path.isfile(self.license_token):
run_cmd("make tuning")
super(EB_Molpro, self).install_step()
# put original LAUNCHER definition back in place in bin/molpro that got installed,
# since the value used during installation point to temporary files
molpro_path = os.path.join(self.full_prefix, 'bin', 'molpro')
apply_regex_substitutions(molpro_path, [(r"^(LAUNCHER\s*=\s*).*$", r"\1 %s" % self.orig_launcher)])
if self.cleanup_token_symlink:
try:
os.remove(self.license_token)
self.log.debug("Symlink to license token %s removed", self.license_token)
except OSError, err:
raise EasyBuildError("Failed to remove %s: %s", self.license_token, err)
def make_module_req_guess(self):
"""Customize $PATH guesses for Molpro module."""
guesses = super(EB_Molpro, self).make_module_req_guess()
guesses.update({
'PATH': [os.path.join(os.path.basename(self.full_prefix), x) for x in ['bin', 'utilities']],
})
return guesses
def sanity_check_step(self):
"""Custom sanity check for Molpro."""
prefix_subdir = os.path.basename(self.full_prefix)
files_to_check = ['bin/molpro']
dirs_to_check = []
if LooseVersion(self.version) >= LooseVersion('2015') or not self.cfg['precompiled_binaries']:
files_to_check.extend(['bin/molpro.exe'])
dirs_to_check.extend(['doc', 'examples', 'utilities'])
custom_paths = {
'files': [os.path.join(prefix_subdir, x) for x in files_to_check],
'dirs': [os.path.join(prefix_subdir, x) for x in dirs_to_check],
}
super(EB_Molpro, self).sanity_check_step(custom_paths=custom_paths)
|
bartoldeman/easybuild-easyblocks
|
easybuild/easyblocks/m/molpro.py
|
Python
|
gpl-2.0
| 11,928
|
[
"Molpro"
] |
e2155c555f2a01fec33d8e74838b63ac3d23ac0900b08a110c82df960ef23c88
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.