text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
from __future__ import print_function, absolute_import
import os
import sys
import imp
import json
import string
import shutil
import subprocess
import tempfile
from distutils.dep_util import newer_group
from distutils.core import Extension
from distutils.errors import DistutilsExecError
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_vars
from distutils.command.build_ext import build_ext as _build_ext
def find_packages():
"""Find all of mdtraj's python packages.
Adapted from IPython's setupbase.py. Copyright IPython
contributors, licensed under the BSD license.
"""
packages = ['mdtraj.scripts']
for dir,subdirs,files in os.walk('MDTraj'):
package = dir.replace(os.path.sep, '.')
if '__init__.py' not in files:
# not a package
continue
packages.append(package.replace('MDTraj', 'mdtraj'))
return packages
def check_dependencies(dependencies):
def module_exists(dep):
try:
imp.find_module(dep)
return True
except ImportError:
return False
for dep in dependencies:
if len(dep) == 1:
import_name, pkg_name = dep[0], dep[0]
elif len(dep) == 2:
import_name, pkg_name = dep
else:
raise ValueError(dep)
if not module_exists(import_name):
lines = [
'-' * 50,
'Warning: This package requires %r. Try' % import_name,
'',
' $ conda install %s' % pkg_name,
'',
'or:',
'',
' $ pip install %s' % pkg_name,
'-' * 50,
]
print(os.linesep.join(lines), file=sys.stderr)
################################################################################
# Detection of compiler capabilities
################################################################################
class CompilerDetection(object):
# Necessary for OSX. See https://github.com/mdtraj/mdtraj/issues/576
# The problem is that distutils.sysconfig.customize_compiler()
# is necessary to properly invoke the correct compiler for this class
# (otherwise the CC env variable isn't respected). Unfortunately,
# distutils.sysconfig.customize_compiler() DIES on OSX unless some
# appropriate initialization routines have been called. This line
# has a side effect of calling those initialzation routes, and is therefor
# necessary for OSX, even though we don't use the result.
_DONT_REMOVE_ME = get_config_vars()
def __init__(self, disable_openmp):
cc = new_compiler()
customize_compiler(cc)
self.msvc = cc.compiler_type == 'msvc'
self._print_compiler_version(cc)
if disable_openmp:
self.openmp_enabled = False
else:
self.openmp_enabled, openmp_needs_gomp = self._detect_openmp()
self.sse3_enabled = self._detect_sse3() if not self.msvc else True
self.sse41_enabled = self._detect_sse41() if not self.msvc else True
self.compiler_args_sse2 = ['-msse2'] if not self.msvc else ['/arch:SSE2']
self.compiler_args_sse3 = ['-mssse3'] if (self.sse3_enabled and not self.msvc) else []
self.compiler_args_sse41, self.define_macros_sse41 = [], []
if self.sse41_enabled:
self.define_macros_sse41 = [('__SSE4__', 1), ('__SSE4_1__', 1)]
if not self.msvc:
self.compiler_args_sse41 = ['-msse4']
if self.openmp_enabled:
self.compiler_libraries_openmp = []
if self.msvc:
self.compiler_args_openmp = ['/openmp']
else:
self.compiler_args_openmp = ['-fopenmp']
if openmp_needs_gomp:
self.compiler_libraries_openmp = ['gomp']
else:
self.compiler_libraries_openmp = []
self.compiler_args_openmp = []
if self.msvc:
self.compiler_args_opt = ['/O2']
else:
self.compiler_args_opt = ['-O3', '-funroll-loops']
print()
def _print_compiler_version(self, cc):
print("C compiler:")
try:
if self.msvc:
if not cc.initialized:
cc.initialize()
cc.spawn([cc.cc])
else:
cc.spawn([cc.compiler[0]] + ['-v'])
except DistutilsExecError:
pass
def hasfunction(self, funcname, include=None, libraries=None, extra_postargs=None):
# running in a separate subshell lets us prevent unwanted stdout/stderr
part1 = '''
from __future__ import print_function
import os
import json
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_vars
FUNCNAME = json.loads('%(funcname)s')
INCLUDE = json.loads('%(include)s')
LIBRARIES = json.loads('%(libraries)s')
EXTRA_POSTARGS = json.loads('%(extra_postargs)s')
''' % {
'funcname': json.dumps(funcname),
'include': json.dumps(include),
'libraries': json.dumps(libraries or []),
'extra_postargs': json.dumps(extra_postargs)}
part2 = '''
get_config_vars() # DON'T REMOVE ME
cc = new_compiler()
customize_compiler(cc)
for library in LIBRARIES:
cc.add_library(library)
status = 0
try:
with open('func.c', 'w') as f:
if INCLUDE is not None:
f.write('#include %s\\n' % INCLUDE)
f.write('int main(void) {\\n')
f.write(' %s;\\n' % FUNCNAME)
f.write('}\\n')
objects = cc.compile(['func.c'], output_dir='.',
extra_postargs=EXTRA_POSTARGS)
cc.link_executable(objects, 'a.out')
except Exception as e:
status = 1
exit(status)
'''
tmpdir = tempfile.mkdtemp(prefix='hasfunction-')
try:
curdir = os.path.abspath(os.curdir)
os.chdir(tmpdir)
with open('script.py', 'w') as f:
f.write(part1 + part2)
proc = subprocess.Popen(
[sys.executable, 'script.py'],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
proc.communicate()
status = proc.wait()
finally:
os.chdir(curdir)
shutil.rmtree(tmpdir)
return status == 0
def _print_support_start(self, feature):
print('Attempting to autodetect {0:6} support...'.format(feature), end=' ')
def _print_support_end(self, feature, status):
if status is True:
print('Compiler supports {0}'.format(feature))
else:
print('Did not detect {0} support'.format(feature))
def _detect_openmp(self):
self._print_support_start('OpenMP')
hasopenmp = self.hasfunction('omp_get_num_threads()', extra_postargs=['-fopenmp', '/openmp'])
needs_gomp = hasopenmp
if not hasopenmp:
hasopenmp = self.hasfunction('omp_get_num_threads()', libraries=['gomp'])
needs_gomp = hasopenmp
self._print_support_end('OpenMP', hasopenmp)
return hasopenmp, needs_gomp
def _detect_sse3(self):
"Does this compiler support SSE3 intrinsics?"
self._print_support_start('SSE3')
result = self.hasfunction('__m128 v; _mm_hadd_ps(v,v)',
include='<pmmintrin.h>',
extra_postargs=['-msse3'])
self._print_support_end('SSE3', result)
return result
def _detect_sse41(self):
"Does this compiler support SSE4.1 intrinsics?"
self._print_support_start('SSE4.1')
result = self.hasfunction( '__m128 v; _mm_round_ps(v,0x00)',
include='<smmintrin.h>',
extra_postargs=['-msse4'])
self._print_support_end('SSE4.1', result)
return result
################################################################################
# Writing version control information to the module
################################################################################
def git_version():
# Return the git revision as a string
# copied from numpy setup.py
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = 'Unknown'
return GIT_REVISION
def write_version_py(VERSION, ISRELEASED, filename='MDTraj/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM MDTRAJ SETUP.PY
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
else:
GIT_REVISION = 'Unknown'
if not ISRELEASED:
FULLVERSION += '.dev-' + GIT_REVISION[:7]
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
class StaticLibrary(Extension):
def __init__(self, *args, **kwargs):
self.export_include = kwargs.pop('export_include', [])
Extension.__init__(self, *args, **kwargs)
class build_ext(_build_ext):
def build_extension(self, ext):
if isinstance(ext, StaticLibrary):
self.build_static_extension(ext)
else:
_build_ext.build_extension(self, ext)
def build_static_extension(self, ext):
from distutils import log
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
self._built_objects = objects[:]
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
language = ext.language or self.compiler.detect_language(sources)
libname = os.path.splitext(os.path.basename(ext_path))[0]
output_dir = os.path.dirname(ext_path)
if (self.compiler.static_lib_format.startswith('lib') and
libname.startswith('lib')):
libname = libname[3:]
if not os.path.exists(output_dir):
# necessary for windows
os.makedirs(output_dir)
self.compiler.create_static_lib(objects,
output_libname=libname,
output_dir=output_dir,
target_lang=language)
for item in ext.export_include:
shutil.copy(item, output_dir)
|
rmcgibbo/msmbuilder
|
basesetup.py
|
Python
|
lgpl-2.1
| 12,526
|
[
"MDTraj"
] |
7a3af12143de53acd15cbdbd9791ad8fb73386d6423e9fb94753c2c4214bb940
|
# coding: utf-8
from __future__ import unicode_literals
"""
Created on Nov 14, 2012
"""
__author__ = "Anubhav Jain"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Anubhav Jain"
__email__ = "ajain@lbl.gov"
__date__ = "Nov 14, 2012"
import unittest
import os
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class FuncTest(PymatgenTest):
pass
if __name__ == "__main__":
unittest.main()
|
yanikou19/pymatgen
|
pymatgen/util/tests/test_io_utils.py
|
Python
|
mit
| 555
|
[
"pymatgen"
] |
1465133a51676606c2ee983b0b3e4196ee1ba63a198b6d4181e7fb9d26ec877e
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import MDAnalysis as mda
import pytest
def test_get_auxreader_for_none():
with pytest.raises(ValueError, match="Must provide either auxdata or format"):
mda.auxiliary.core.get_auxreader_for()
def test_get_auxreader_for_wrong_auxdata():
with pytest.raises(ValueError, match="Unknown auxiliary data format for auxdata:"):
mda.auxiliary.core.get_auxreader_for(auxdata="test.none")
def test_get_auxreader_for_wrong_format():
with pytest.raises(ValueError, match="Unknown auxiliary data format"):
mda.auxiliary.core.get_auxreader_for(format="none")
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/auxiliary/test_core.py
|
Python
|
gpl-2.0
| 1,662
|
[
"MDAnalysis"
] |
c4b1535cd479cf10a3499b3c1b7dd6ae15fade88ae98e73136b7099e85417299
|
import itertools
import random
from ..core import Basic, Expr, Integer, Symbol, count_ops
from ..core.compatibility import as_int, is_sequence
from ..core.decorators import call_highest_priority
from ..core.sympify import sympify
from ..functions import cos, sin, sqrt
from ..logic import true
from ..simplify import simplify as _simplify
from ..utilities import filldedent, numbered_symbols
from ..utilities.decorator import doctest_depends_on
from .matrices import MatrixBase, ShapeError, a2idx, classof
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class DenseMatrix(MatrixBase):
"""A dense matrix base class."""
is_MatrixExpr = False
_op_priority = 10.01
_class_priority = 4
def __getitem__(self, key):
"""Return portion of self defined by key. If the key involves a slice
then a list will be returned (if key is a single slice) or a matrix
(if key was a tuple involving a slice).
Examples
========
>>> m = Matrix([[1, 2 + I], [3, 4]])
If the key is a tuple that doesn't involve a slice then that element
is returned:
>>> m[1, 0]
3
When a tuple key involves a slice, a matrix is returned. Here, the
first column is selected (all rows, column 0):
>>> m[:, 0]
Matrix([
[1],
[3]])
If the slice is not a tuple then it selects from the underlying
list of elements that are arranged in row order and a list is
returned if a slice is involved:
>>> m[0]
1
>>> m[::2]
[1, 3]
"""
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._mat[i*self.cols + j]
except (TypeError, IndexError) as exc:
if any(isinstance(_, Expr) and not _.is_number for _ in (i, j)):
if true in (j < 0, j >= self.shape[1], i < 0,
i >= self.shape[0]):
raise ValueError('index out of boundary') from exc
from .expressions.matexpr import MatrixElement
return MatrixElement(self, i, j)
if isinstance(i, slice):
i = range(self.rows)[i]
elif is_sequence(i):
pass
else:
i = [i]
if isinstance(j, slice):
j = range(self.cols)[j]
elif is_sequence(j):
pass
else:
j = [j]
return self.extract(i, j)
else:
# row-wise decomposition of matrix
if isinstance(key, slice):
return self._mat[key]
return self._mat[a2idx(key)]
def __setitem__(self, key, value):
raise NotImplementedError
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
for i in range(self.rows):
for j in range(i + 1, self.cols):
if self[i, j] or self[j, i]:
return False
return True
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return [self._mat[i: i + self.cols]
for i in range(0, len(self), self.cols)]
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> eye(3).trace()
3
"""
trace = 0
for i in range(self.cols):
trace += self._mat[i*self.cols + i]
return trace
def _eval_determinant(self):
return self.det()
def _eval_transpose(self):
"""Matrix transposition.
Examples
========
>>> m = Matrix(((1, 2 + I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m.transpose()
Matrix([
[ 1, 3],
[2 + I, 4]])
>>> m.T == m.transpose()
True
See Also
========
conjugate: By-element conjugation
"""
a = []
for i in range(self.cols):
a.extend(self._mat[i::self.cols])
return self._new(self.cols, self.rows, a)
def _eval_conjugate(self):
"""By-element conjugation.
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
out = self._new(self.rows, self.cols,
lambda i, j: self[i, j].conjugate())
return out
def _eval_adjoint(self):
return self.T.C
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using the method indicated (default
is Gauss elimination).
kwargs
======
method : ('GE', 'LU', or 'ADJ')
iszerofunc
try_block_diag
Notes
=====
According to the ``method`` keyword, it calls the appropriate method:
GE .... inverse_GE(); default
LU .... inverse_LU()
ADJ ... inverse_ADJ()
According to the ``try_block_diag`` keyword, it will try to form block
diagonal matrices using the method get_diag_blocks(), invert these
individually, and then reconstruct the full inverse matrix.
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the ``iszerosfunc`` argument to a function that
should return True if its argument is zero. The ADJ routine computes
the determinant and uses that to detect singular matrices in addition
to testing for zeros on the diagonal.
See Also
========
inverse_LU
inverse_GE
inverse_ADJ
"""
from . import diag
method = kwargs.get('method', 'GE')
iszerofunc = kwargs.get('iszerofunc', _iszero)
if kwargs.get('try_block_diag', False):
blocks = self.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return diag(*r)
M = self.as_mutable()
if method == 'GE':
rv = M.inverse_GE(iszerofunc=iszerofunc)
elif method == 'LU':
rv = M.inverse_LU(iszerofunc=iszerofunc)
elif method == 'ADJ':
rv = M.inverse_ADJ(iszerofunc=iszerofunc)
else:
# make sure to add an invertibility check (as in inverse_LU)
# if a new method is added.
raise ValueError('Inversion method unrecognized')
return self._new(rv)
def equals(self, other, failing_expression=False):
"""Applies ``equals`` to corresponding elements of the matrices,
trying to prove that the elements are equivalent, returning True
if they are, False if any pair is not, and None (or the first
failing expression if failing_expression is True) if it cannot
be decided if the expressions are equivalent or not. This is, in
general, an expensive operation.
Examples
========
>>> A = Matrix([x*(x - 1), 0])
>>> B = Matrix([x**2 - x, 0])
>>> A == B
False
>>> A.simplify() == B.simplify()
True
>>> A.equals(B)
True
>>> A.equals(2)
False
See Also
========
diofant.core.expr.Expr.equals
"""
try:
if self.shape != other.shape:
return False
rv = True
for i in range(self.rows):
for j in range(self.cols):
ans = self[i, j].equals(other[i, j], failing_expression)
if ans is False:
return False
return rv
except AttributeError:
return False
def __eq__(self, other):
from . import Matrix
try:
if self.shape != other.shape:
return False
if isinstance(other, Matrix):
return self._mat == other._mat
elif isinstance(other, MatrixBase): # pragma: no branch
return self._mat == Matrix(other)._mat
except AttributeError:
return False
def _cholesky(self):
"""Helper function of cholesky.
Without the error checks.
To be used privately.
"""
L = zeros(self.rows, self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / L[j, j])*(self[i, j] -
sum(L[i, k]*L[j, k] for k in range(j)))
L[i, i] = sqrt(self[i, i] -
sum(L[i, k]**2 for k in range(i)))
return self._new(L)
def _LDLdecomposition(self):
"""Helper function of LDLdecomposition.
Without the error checks.
To be used privately.
"""
D = zeros(self.rows, self.rows)
L = eye(self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / D[j, j])*(self[i, j] - sum(
L[i, k]*L[j, k]*D[k, k] for k in range(j)))
D[i, i] = self[i, i] - sum(L[i, k]**2*D[k, k]
for k in range(i))
return self._new(L), self._new(D)
def _lower_triangular_solve(self, rhs):
"""Helper function of function lower_triangular_solve.
Without the error checks.
To be used privately.
"""
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in range(self.rows):
if self[i, i] == 0:
raise ValueError('Matrix must be non-singular.')
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i))) / self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Helper function of function upper_triangular_solve.
Without the error checks, to be used privately.
"""
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in reversed(range(self.rows)):
if self[i, i] == 0:
raise ValueError('Matrix must be non-singular.')
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i + 1, self.rows))) / self[i, i]
return self._new(X)
def _diagonal_solve(self, rhs):
"""Helper function of function diagonal_solve,
without the error checks, to be used privately.
"""
return self._new(rhs.rows, rhs.cols, lambda i, j: rhs[i, j] / self[i, i])
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> m = Matrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError('`f` must be callable.')
out = self._new(self.rows, self.cols, list(map(f, self._mat)))
return out
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if len(self) != rows*cols:
raise ValueError(f'Invalid reshape parameters {rows:d} {cols:d}')
return self._new(rows, cols, lambda i, j: self._mat[i*cols + j])
def as_mutable(self):
"""Returns a mutable version of this matrix
Examples
========
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return MutableMatrix(self)
def as_immutable(self):
"""Returns an Immutable version of this Matrix."""
from .immutable import ImmutableMatrix
if self.rows and self.cols:
return ImmutableMatrix._new(self.tolist())
return ImmutableMatrix._new(self.rows, self.cols, [])
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls._new(r, c, [cls._sympify(0)]*r*c)
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
mat = [cls._sympify(0)]*n*n
mat[::n + 1] = [cls._sympify(1)]*n
return cls._new(n, n, mat)
############################
# Mutable matrix operators #
############################
@call_highest_priority('__radd__')
def __add__(self, other):
return super().__add__(_force_mutable(other))
@call_highest_priority('__rsub__')
def __sub__(self, other):
return super().__sub__(_force_mutable(other))
@call_highest_priority('__rmul__')
def __mul__(self, other):
"""Return self*other."""
return super().__mul__(_force_mutable(other))
@call_highest_priority('__mul__')
def __rmul__(self, other):
return super().__rmul__(_force_mutable(other))
@call_highest_priority('__truediv__')
def __truediv__(self, other):
return super().__truediv__(_force_mutable(other))
@call_highest_priority('__rpow__')
def __pow__(self, other):
return super().__pow__(other)
def _force_mutable(x):
"""Return a matrix as a Matrix, otherwise return x."""
if getattr(x, 'is_Matrix', False):
return x.as_mutable()
elif isinstance(x, Basic):
return x
elif hasattr(x, '__array__'):
a = x.__array__()
if len(a.shape) == 0:
return sympify(a)
return MutableMatrix(x)
return x
class MutableDenseMatrix(DenseMatrix, MatrixBase):
"""A mutable version of the dense matrix."""
@classmethod
def _new(cls, *args, **kwargs):
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
self = object.__new__(cls)
self.rows = rows
self.cols = cols
self._mat = list(flat_list) # create a shallow copy
return self
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
def as_mutable(self):
return self.copy()
def __setitem__(self, key, value):
"""Set matrix item.
Examples
========
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2
>>> M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4
>>> M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
self._mat[i*self.cols + j] = value
def copyin_matrix(self, key, value):
"""Copy in values from a matrix into the given bounds.
Parameters
==========
key : slice
The section of this matrix to replace.
value : Matrix
The matrix to copy values from.
Examples
========
>>> M = Matrix([[0, 1], [2, 3], [4, 5]])
>>> I = eye(3)
>>> I[:3, :2] = M
>>> I
Matrix([
[0, 1, 0],
[2, 3, 0],
[4, 5, 1]])
>>> I[0, 1] = M
>>> I
Matrix([
[0, 0, 1],
[2, 2, 3],
[4, 4, 5]])
See Also
========
diofant.matrices.dense.MutableDenseMatrix.copyin_list
"""
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(filldedent("The Matrix `value` doesn't have the "
'same dimensions '
'as the in sub-Matrix given by `key`.'))
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
def copyin_list(self, key, value):
"""Copy in elements from a list.
Parameters
==========
key : slice
The section of this matrix to replace.
value : iterable
The iterable to copy values from.
Examples
========
>>> I = eye(3)
>>> I[:2, 0] = [1, 2] # col
>>> I
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
>>> I[1, :2] = [[3, 4]]
>>> I
Matrix([
[1, 0, 0],
[3, 4, 0],
[0, 0, 1]])
See Also
========
diofant.matrices.dense.MutableDenseMatrix.copyin_matrix
"""
if not is_sequence(value):
raise TypeError(f'`value` must be an ordered iterable, not {type(value)}.')
return self.copyin_matrix(key, MutableMatrix(value))
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> M = eye(3)
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u)
>>> M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
diofant.matrices.dense.MutableDenseMatrix.row_op
diofant.matrices.dense.MutableDenseMatrix.col_op
"""
i0 = i*self.cols
k0 = k*self.cols
ri = self._mat[i0: i0 + self.cols]
rk = self._mat[k0: k0 + self.cols]
self._mat[i0: i0 + self.cols] = [f(x, y) for x, y in zip(ri, rk)]
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> M = eye(3)
>>> M.row_op(1, lambda v, j: v + 2*M[0, j])
>>> M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
diofant.matrices.dense.MutableDenseMatrix.zip_row_op
diofant.matrices.dense.MutableDenseMatrix.col_op
"""
i0 = i*self.cols
ri = self._mat[i0: i0 + self.cols]
self._mat[i0: i0 + self.cols] = [f(x, j) for x, j in zip(ri, range(self.cols))]
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i).
Examples
========
>>> M = eye(3)
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0])
>>> M
Matrix([
[1, 2, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
diofant.matrices.dense.MutableDenseMatrix.row_op
"""
self._mat[j::self.cols] = [f(*t) for t in list(zip(self._mat[j::self.cols], range(self.rows)))]
def row_swap(self, i, j):
"""Swap the two given rows of the matrix in-place.
Examples
========
>>> M = Matrix([[0, 1], [1, 0]])
>>> M
Matrix([
[0, 1],
[1, 0]])
>>> M.row_swap(0, 1)
>>> M
Matrix([
[1, 0],
[0, 1]])
See Also
========
diofant.matrices.dense.MutableDenseMatrix.col_swap
"""
for k in range(self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def col_swap(self, i, j):
"""Swap the two given columns of the matrix in-place.
Examples
========
>>> M = Matrix([[1, 0], [1, 0]])
>>> M
Matrix([
[1, 0],
[1, 0]])
>>> M.col_swap(0, 1)
>>> M
Matrix([
[0, 1],
[0, 1]])
See Also
========
diofant.matrices.dense.MutableDenseMatrix.row_swap
"""
for k in range(self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def __delitem__(self, key):
"""Delete portion of self defined by key.
Examples
========
>>> M = eye(3)
>>> del M[1, :]
>>> M
Matrix([
[1, 0, 0],
[0, 0, 1]])
>>> del M[:, 0]
>>> M
Matrix([
[0, 0],
[0, 1]])
"""
i, j = self.key2ij(key)
if isinstance(i, int) and j == slice(None):
del self._mat[i*self.cols:(i + 1)*self.cols]
self.rows -= 1
elif i == slice(None) and isinstance(j, int):
for i in range(self.rows - 1, -1, -1):
del self._mat[j + i*self.cols]
self.cols -= 1
else:
raise NotImplementedError
# Utility functions
def simplify(self, ratio=1.7, measure=count_ops):
"""Applies simplify to the elements of a matrix in place.
This is a shortcut for M.applyfunc(lambda x: simplify(x, ratio, measure))
See Also
========
diofant.simplify.simplify.simplify
"""
for i, mi in enumerate(self._mat):
self._mat[i] = _simplify(mi, ratio=ratio, measure=measure)
def fill(self, value):
"""Fill the matrix with the scalar value.
See Also
========
diofant.matrices.dense.zeros
diofant.matrices.dense.ones
"""
self._mat = [value]*len(self) # pylint: disable=attribute-defined-outside-init
MutableMatrix = MutableDenseMatrix
###########
# Numpy Utility Functions:
# list2numpy, matrix2numpy, symmarray, rot_axis[123]
###########
def list2numpy(l, dtype=object): # pragma: no cover
"""Converts python list of Diofant expressions to a NumPy array.
See Also
========
diofant.matrices.dense.matrix2numpy
"""
from numpy import empty
a = empty(len(l), dtype)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m, dtype=object): # pragma: no cover
"""Converts Diofant's matrix to a NumPy array.
See Also
========
diofant.matrices.dense.list2numpy
"""
from numpy import empty
a = empty(m.shape, dtype)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
@doctest_depends_on(modules=('numpy',))
def symarray(prefix, shape, **kwargs): # pragma: no cover
r"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named ``prefix_i1_i2_``... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as Diofant symbols with identical names are the same object.
Parameters
==========
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
\*\*kwargs : dict
keyword arguments passed on to Symbol
Examples
========
These doctests require numpy.
>>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>>> a = symarray('', 3)
>>> b = symarray('', 3)
>>> a[0] == b[0]
True
>>> a = symarray('a', 3)
>>> b = symarray('b', 3)
>>> a[0] == b[0]
False
Creating symarrays with a prefix:
>>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>>> symarray('a', (2, 3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>>> symarray('a', (2, 3, 2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
For setting assumptions of the underlying Symbols:
>>> [s.is_real for s in symarray('a', 2, real=True)]
[True, True]
"""
from numpy import empty, ndindex
arr = empty(shape, dtype=object)
for index in ndindex(shape):
arr[index] = Symbol(f"{prefix}_{'_'.join(map(str, index))}",
**kwargs)
return arr
def rot_axis3(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 3-axis.
Examples
========
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis3(theta)
Matrix([
[ 1/2, sqrt(3)/2, 0],
[-sqrt(3)/2, 1/2, 0],
[ 0, 0, 1]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis3(pi/2)
Matrix([
[ 0, 1, 0],
[-1, 0, 0],
[ 0, 0, 1]])
See Also
========
diofant.matrices.dense.rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
diofant.matrices.dense.rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
"""
from . import Matrix
ct = cos(theta)
st = sin(theta)
lil = ((ct, st, 0),
(-st, ct, 0),
(0, 0, 1))
return Matrix(lil)
def rot_axis2(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 2-axis.
Examples
========
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis2(theta)
Matrix([
[ 1/2, 0, -sqrt(3)/2],
[ 0, 1, 0],
[sqrt(3)/2, 0, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis2(pi/2)
Matrix([
[0, 0, -1],
[0, 1, 0],
[1, 0, 0]])
See Also
========
diofant.matrices.dense.rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
diofant.matrices.dense.rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
from . import Matrix
ct = cos(theta)
st = sin(theta)
lil = ((ct, 0, -st),
(0, 1, 0),
(st, 0, ct))
return Matrix(lil)
def rot_axis1(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 1-axis.
Examples
========
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis1(theta)
Matrix([
[1, 0, 0],
[0, 1/2, sqrt(3)/2],
[0, -sqrt(3)/2, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis1(pi/2)
Matrix([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0]])
See Also
========
diofant.matrices.dense.rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
diofant.matrices.dense.rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
from . import Matrix
ct = cos(theta)
st = sin(theta)
lil = ((1, 0, 0),
(0, ct, st),
(0, -st, ct))
return Matrix(lil)
###############
# Functions
###############
def matrix_multiply_elementwise(A, B):
"""Return the Hadamard product (elementwise product) of A and B
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> matrix_multiply_elementwise(A, B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
diofant.matrices.dense.DenseMatrix.__mul__
"""
if A.shape != B.shape:
raise ShapeError()
shape = A.shape
return classof(A, B)._new(shape[0], shape[1],
lambda i, j: A[i, j]*B[i, j])
def ones(r, c=None):
"""Returns a matrix of ones with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
diofant.matrices.dense.zeros
diofant.matrices.dense.eye
diofant.matrices.dense.diag
"""
from . import Matrix
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return Matrix(r, c, [Integer(1)]*r*c)
def zeros(r, c=None, cls=None):
"""Returns a matrix of zeros with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
diofant.matrices.dense.ones
diofant.matrices.dense.eye
diofant.matrices.dense.diag
"""
if cls is None:
from . import Matrix as cls # noqa: N813
return cls.zeros(r, c)
def eye(n, cls=None):
"""Create square identity matrix n x n
See Also
========
diofant.matrices.dense.diag
diofant.matrices.dense.zeros
diofant.matrices.dense.ones
"""
if cls is None:
from . import Matrix as cls # noqa: N813
return cls.eye(n)
def diag(*values, **kwargs):
"""Create a sparse, diagonal matrix from a list of diagonal values.
Notes
=====
When arguments are matrices they are fitted in resultant matrix.
The returned matrix is a mutable, dense matrix. To make it a different
type, send the desired class for keyword ``cls``.
Examples
========
>>> diag(1, 2, 3)
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> diag(*[1, 2, 3])
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
The diagonal elements can be matrices; diagonal filling will
continue on the diagonal from the last element of the matrix:
>>> a = Matrix([x, y, z])
>>> b = Matrix([[1, 2], [3, 4]])
>>> c = Matrix([[5, 6]])
>>> diag(a, 7, b, c)
Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
When diagonal elements are lists, they will be treated as arguments
to Matrix:
>>> diag([1, 2, 3], 4)
Matrix([
[1, 0],
[2, 0],
[3, 0],
[0, 4]])
>>> diag([[1, 2, 3]], 4)
Matrix([
[1, 2, 3, 0],
[0, 0, 0, 4]])
A given band off the diagonal can be made by padding with a
vertical or horizontal "kerning" vector:
>>> hpad = ones(0, 2)
>>> vpad = ones(2, 0)
>>> diag(vpad, 1, 2, 3, hpad) + diag(hpad, 4, 5, 6, vpad)
Matrix([
[0, 0, 4, 0, 0],
[0, 0, 0, 5, 0],
[1, 0, 0, 0, 6],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0]])
The type is mutable by default but can be made immutable by setting
the ``mutable`` flag to False:
>>> type(diag(1))
<class 'diofant.matrices.dense.MutableDenseMatrix'>
>>> type(diag(1, cls=ImmutableMatrix))
<class 'diofant.matrices.immutable.ImmutableMatrix'>
See Also
========
diofant.matrices.dense.eye
"""
from . import Matrix
from .sparse import MutableSparseMatrix
cls = kwargs.pop('cls', None)
if cls is None:
from . import Matrix as cls # noqa: N813
if kwargs:
raise ValueError('unrecognized keyword%s: %s' % ( # noqa: SFS101
's' if len(kwargs) > 1 else '',
', '.join(kwargs)))
rows = 0
cols = 0
values = list(values)
for i, m in enumerate(values):
if isinstance(m, MatrixBase):
rows += m.rows
cols += m.cols
elif is_sequence(m):
m = values[i] = Matrix(m)
rows += m.rows
cols += m.cols
else:
rows += 1
cols += 1
res = MutableSparseMatrix.zeros(rows, cols)
i_row = 0
i_col = 0
for m in values:
if isinstance(m, MatrixBase):
res[i_row:i_row + m.rows, i_col:i_col + m.cols] = m
i_row += m.rows
i_col += m.cols
else:
res[i_row, i_col] = m
i_row += 1
i_col += 1
return cls._new(res)
def vandermonde(order, gen=None):
"""Computes a Vandermonde matrix of given order and dimension."""
if not gen:
gen = numbered_symbols('C')
a = list(itertools.islice(gen, int(order)))
m = zeros(order)
for i, v in enumerate(a):
for j in range(order):
m[i, j] = v**j
return m
def jordan_cell(eigenval, n):
"""
Create matrix of Jordan cell kind:
Examples
========
>>> jordan_cell(x, 4)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
"""
n = as_int(n)
out = zeros(n)
for i in range(n - 1):
out[i, i] = eigenval
out[i, i + 1] = Integer(1)
out[n - 1, n - 1] = eigenval
return out
def hessian(f, varlist, constraints=[]):
"""Compute Hessian matrix for a function f wrt parameters in varlist
which may be given as a sequence or a row/column vector. A list of
constraints may optionally be given.
Examples
========
>>> f = Function('f')(x, y)
>>> g1 = Function('g')(x, y)
>>> g2 = x**2 + 3*y
>>> pprint(hessian(f, (x, y), [g1, g2]), use_unicode=False)
[ d d ]
[ 0 0 --(g(x, y)) --(g(x, y)) ]
[ dx dy ]
[ ]
[ 0 0 2*x 3 ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 2*x ---(f(x, y)) -----(f(x, y))]
[dx 2 dy dx ]
[ dx ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 3 -----(f(x, y)) ---(f(x, y)) ]
[dy dy dx 2 ]
[ dy ]
References
==========
https://en.wikipedia.org/wiki/Hessian_matrix
See Also
========
diofant.matrices.matrices.MatrixBase.jacobian
diofant.matrices.dense.wronskian
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, MatrixBase):
if 1 not in varlist.shape:
raise ShapeError('`varlist` must be a column or row vector.')
if varlist.cols == 1:
varlist = varlist.T
varlist = varlist.tolist()[0]
if is_sequence(varlist):
n = len(varlist)
if not n:
raise ShapeError('`len(varlist)` must not be zero.')
else:
raise ValueError('Improper variable list in hessian function')
if not getattr(f, 'diff', None):
# check differentiability
raise ValueError(f'Function `f` ({f}) is not differentiable')
m = len(constraints)
N = m + n
out = zeros(N)
for k, g in enumerate(constraints):
if not getattr(g, 'diff', None):
# check differentiability
raise ValueError(f'Function `f` ({f}) is not differentiable')
for i in range(n):
out[k, i + m] = g.diff(varlist[i])
for i in range(n):
for j in range(i, n):
out[i + m, j + m] = f.diff(varlist[i]).diff(varlist[j])
for i in range(N):
for j in range(i + 1, N):
out[j, i] = out[i, j]
return out
def GramSchmidt(vlist, orthonormal=False):
"""
Apply the Gram-Schmidt process to a set of vectors.
see: https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
"""
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if not tmp.values():
raise ValueError(
'GramSchmidt: vector set not linearly independent')
out.append(tmp)
if orthonormal:
for i, oi in enumerate(out):
out[i] = oi.normalized()
return out
def wronskian(functions, var, method='bareiss'):
"""
Compute Wronskian for [] of functions
::
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1, ..., fn) = | . . . . |
| . . . . |
| (n) (n) (n) |
| D (f1) D (f2) ... D (fn) |
see: https://en.wikipedia.org/wiki/Wronskian
See Also
========
diofant.matrices.matrices.MatrixBase.jacobian
diofant.matrices.dense.hessian
"""
from . import Matrix
for index, f in enumerate(functions):
functions[index] = sympify(f)
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i, j: functions[i].diff((var, j)))
return W.det(method)
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant::
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis:
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
from . import Matrix
seqs = list(map(sympify, seqs))
if not zero:
def f(i, j):
return seqs[j].subs({n: n + i})
else:
def f(i, j):
return seqs[j].subs({n: i})
k = len(seqs)
return Matrix(k, k, f).det()
def randMatrix(r, c=None, min=0, max=99, seed=None, symmetric=False, percent=100):
"""Create random matrix with dimensions ``r`` x ``c``. If ``c`` is omitted
the matrix will be square. If ``symmetric`` is True the matrix must be
square. If ``percent`` is less than 100 then only approximately the given
percentage of elements will be non-zero.
Examples
========
>>> randMatrix(3) # doctest:+SKIP
[25, 45, 27]
[44, 54, 9]
[23, 96, 46]
>>> randMatrix(3, 2) # doctest:+SKIP
[87, 29]
[23, 37]
[90, 26]
>>> randMatrix(3, 3, 0, 2) # doctest:+SKIP
[0, 2, 0]
[2, 0, 1]
[0, 0, 1]
>>> randMatrix(3, symmetric=True) # doctest:+SKIP
[85, 26, 29]
[26, 71, 43]
[29, 43, 57]
>>> A = randMatrix(3, seed=1)
>>> B = randMatrix(3, seed=2)
>>> A == B # doctest:+SKIP
False
>>> A == randMatrix(3, seed=1)
True
>>> randMatrix(3, symmetric=True, percent=50) # doctest:+SKIP
[0, 68, 43]
[0, 68, 0]
[0, 91, 34]
"""
from . import Matrix
if c is None:
c = r
if seed is None:
prng = random.Random() # use system time
else:
prng = random.Random(seed)
if symmetric and r != c:
raise ValueError(
f'For symmetric matrices, r must equal c, but {r:d} != {c:d}')
if not symmetric:
m = Matrix._new(r, c, lambda i, j: prng.randint(min, max))
else:
m = zeros(r)
for i in range(r):
for j in range(i, r):
m[i, j] = prng.randint(min, max)
for i in range(r):
for j in range(i):
m[i, j] = m[j, i]
if percent == 100:
return m
else:
z = int(r*c*percent // 100)
m._mat[:z] = [Integer(0)]*z
prng.shuffle(m._mat)
return m
|
diofant/diofant
|
diofant/matrices/dense.py
|
Python
|
bsd-3-clause
| 41,640
|
[
"DIRAC"
] |
167cb31ef409e72b330354b3e69cd80d8bde7deb14862ee7cbbe68aebedb87bb
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('profiles_indicatordata', 'feature_id', 'record_id')
def backwards(self, orm):
db.rename_column('profiles_indicatordata', 'record_id', 'feature_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.datadomain': {
'Meta': {'object_name': 'DataDomain'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.Indicator']", 'through': "orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '20', 'db_index': 'True'})
},
'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.DataSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'})
},
'profiles.geomapping': {
'Meta': {'object_name': 'GeoMapping'},
'from_record': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mappings_as_from'", 'to': "orm['profiles.GeoRecord']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'to_record': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_as_to'", 'symmetrical': 'False', 'to': "orm['profiles.GeoRecord']"})
},
'profiles.georecord': {
'Meta': {'unique_together': "(('level', 'geo_id', 'custom_name', 'owner'),)", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': "orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoLevel']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'profiles.indicator': {
'Meta': {'object_name': 'Indicator'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'profiles.indicatordata': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'IndicatorData'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']", 'null': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
},
'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataDomain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"})
},
'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Indicator']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Time']"})
},
'profiles.time': {
'Meta': {'object_name': 'Time'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
}
}
complete_apps = ['profiles']
|
ProvidencePlan/Profiles
|
communityprofiles/profiles/oldmigrations/0006_change_feature_field_to_record.py
|
Python
|
mit
| 10,006
|
[
"MOE"
] |
b43a0a6cd50d026802c82118fab976726f09c52f608f59eb82cb51cd2468083c
|
"""
Die Main-Methode, die das 'Sonnensystem' startet.
Inspiriert von den Panda3D Samples.
Setzt alles zusammen und startet die Applikation.
"""
__author__ = 'Thomas Taschner, Michael Weinberger'
__date__ = 20151209
__version__ = 1.0
import direct.directbase.DirectStart
from direct.showbase import DirectObject
from panda3d.core import *
from direct.gui.DirectGui import *
from direct.showbase.DirectObject import DirectObject
from Ambiance import *
from Lighting import *
from Galaxy import *
from BodyFactory import *
import sys
class World(DirectObject):
def __init__(self):
self.amb = Ambiance(base, loader, 1.0, 0.6, -0.5, 0.5)
self.lig = Lighting(render)
self.amb.initbg()
self.title = OnscreenText(text="Venus and Mars - Taschner | Weinberger 5BHIT", style=1, fg=(1, 1, 1, 1),
pos=(0.9, -0.95), scale=.03)
self.galaxy = Galaxy("models/solar_sky_sphere.egg", "models/galaxie.jpg", 1000, loader, render)
self.galaxy.loadenvironment()
deathstar = BodyFactory.create_object('deathstar')
deathstar.loadobject()
deathstar.rotatesun()
mercury = BodyFactory.create_object('mercury')
mercury.loadobject()
mercury.rotateobject(0.241, 59)
venus = BodyFactory.create_object('venus')
venus.loadobject()
venus.rotateobject(0.615, 243)
mars = BodyFactory.create_object('mars')
mars.loadobject()
mars.rotateobject(1.5, 1.7)
earth = BodyFactory.create_object('earth')
earth.loadobject()
earth.rotateobject(1, 1.5)
moon = BodyFactory.create_object('moon')
moon.loadmoon(earth)
moon.rotateobject(.1, 1)
tatooine = BodyFactory.create_object('tatooine')
tatooine.loadobject()
tatooine.rotateobject(10, 10)
mordor = BodyFactory.create_object('mordor')
mordor.loadobject()
mordor.rotateobject(0.7, 4)
xwing = BodyFactory.create_object('xwing')
xwing.loadxwing()
xwing.rotateobject(0.1, 25000)
self.lig.activateshadows()
self.amb.startsound() # Sound soll erst beginnen, wenn alles fertig geladen ist
self.accept('escape', sys.exit) # Programm schliessen, wenn Escape gedrueckt wird
w = World()
run()
|
mweinberger-tgm/VenusAndMars
|
src/VenusAndMars.py
|
Python
|
apache-2.0
| 2,334
|
[
"Galaxy"
] |
bb24069bb0dff1009c49fa317104fe49ce598bfce5d40189f6b002131c67c4b3
|
# Version: 0.15+dev
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `style`: the style of version string to be produced. See "Styles" below for
details. Defaults to "pep440", which looks like
`TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py setup_versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py setup_versioneer` command (described below)
will append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None`. To actually use the computed version string,
your `setup.py` will need to override `distutils.command.build_scripts`
with a subclass that explicitly inserts a copy of
`versioneer.get_version()` into your script file. See
`test/demoapp-script-only/setup.py` for an example.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string, using either `tag_prefix=` or `tag_prefix=''`.
* `parentdir_prefix`:
a optional string, frequently the same as tag_prefix, which appears at the
start of all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
just omit the field from your `setup.cfg`.
This tool provides one script, named `versioneer`. That script has one mode,
"install", which writes a copy of `versioneer.py` into the current directory
and runs `versioneer.py setup` to finish the installation.
To versioneer-enable your project:
* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
populating it with the configuration values you decided earlier (note that
the option names are not case-sensitive):
````
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
````
* 2: Run `versioneer install`. This will do the following:
* copy `versioneer.py` into the top of your source tree
* create `_version.py` in the right place (`versionfile_source`)
* modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`)
* modify your `MANIFEST.in` to include both `versioneer.py` and the
generated `_version.py` in sdist tarballs
`versioneer install` will complain about any problems it finds with your
`setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
the problems.
* 3: add a `import versioneer` to your setup.py, and add the following
arguments to the setup() call:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: commit these changes to your VCS. To make sure you won't forget,
`versioneer install` will mark everything it touched for addition using
`git add`. Don't forget to add `setup.py` and `setup.cfg` too.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
least one tag in its history.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
### Upgrading to 0.15
Starting with this version, Versioneer is configured with a `[versioneer]`
section in your `setup.cfg` file. Earlier versions required the `setup.py` to
set attributes on the `versioneer` module immediately after import. The new
version will refuse to run (raising an exception during import) until you
have provided the necessary `setup.cfg` section.
In addition, the Versioneer package provides an executable named
`versioneer`, and the installation process is driven by running `versioneer
install`. In 0.14 and earlier, the executable was named
`versioneer-installer` and was run without an argument.
### Upgrading to 0.14
0.14 changes the format of the version string. 0.13 and earlier used
hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
plus-separated "local version" section strings, with dot-separated
components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
format, but should be ok with the new one.
### Upgrading from 0.11 to 0.12
Nothing special.
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py setup_versioneer`. This will enable the use of additional
version-control systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15+dev (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-time keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.15+dev) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
|
fusionapp/methanal
|
versioneer.py
|
Python
|
mit
| 65,723
|
[
"Brian"
] |
56a4733c478d008e1261d1f8f8b9650dd7f4ebfb79e2b0e7d0b8c1aed55b9b17
|
#!/usr/bin/env python3
import os
import sys
import argparse
import traceback
# Add the pulsar path
thispath = os.path.dirname(os.path.realpath(__file__))
psrpath = os.path.join(os.path.dirname(thispath), "modules")
sys.path.insert(0, psrpath)
import pulsar as psr
def Run():
try:
out = psr.output.GetGlobalOut()
tester = psr.testing.Tester("Testing BasisSet class")
tester.PrintHeader()
atoms = [ psr.system.CreateAtom(0, [ 0.000000000000, 0.000000000000, 0.000000000000], 1),
psr.system.CreateAtom(1, [ 1.000000000000, 0.000000000000, 0.000000000000], 1),
psr.system.CreateAtom(2, [ 0.000000000000, 1.000000000000, 0.000000000000], 8),
psr.system.CreateAtom(2, [ 0.000000000000, 1.000000000000, 0.000000000000], 8),
psr.system.CreateAtom(2, [ 0.000000000000, 1.000000000000, 0.000000000000], 8),
psr.system.CreateAtom(2, [ 0.000000000000, 1.000000000000, 0.000000000000], 8),
psr.system.CreateAtom(2, [ 0.000000000000, 1.000000000000, 0.000000000000], 8),
psr.system.CreateAtom(2, [ 0.000000000000, 1.000000000000, 0.000000000000], 8),
psr.system.CreateAtom(2, [ 0.000000000000, 1.000000000000, 0.000000000000], 8),
psr.system.CreateAtom(2, [ 0.000000000000, 1.000000000000, 0.000000000000], 8),
psr.system.CreateAtom(2, [ 0.000000000000, 1.000000000000, 0.000000000000], 8),
]
molu = psr.system.AtomSetUniverse()
for a in atoms:
molu.append(a)
mol = psr.system.System(molu, True)
mol = psr.system.ApplySingleBasis(psr.system.ShellType.Gaussian, "primary", "sto-3g", mol)
bs = mol.GetBasisSet("primary")
bs.Print(out)
tester.PrintResults()
except Exception as e:
psr.output.GlobalOutput("Caught exception in main handler. Contact the developers\n")
traceback.print_exc()
psr.output.GlobalError("\n")
psr.output.GlobalError(str(e))
psr.output.GlobalError("\n")
psr.Init(sys.argv, out = "stdout", color = True, debug = True)
Run()
psr.Finalize()
|
pulsar-chem/Pulsar-Core
|
test/old/Old2/old/BasisSet.py
|
Python
|
bsd-3-clause
| 2,286
|
[
"Gaussian"
] |
c671652d79ebb0a7aa600180f9606c1037a67b1037ff54a9d839be6d6cfb93a4
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy
from numpy.core.multiarray import normalize_axis_index
from . import _ni_support
from . import _nd_image
__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
'fourier_shift']
def _get_output_fourier(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128,
numpy.float32]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.float64)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128,
numpy.float32, numpy.float64]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def _get_output_fourier_complex(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.complex128)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
"""
Multidimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
input : array_like
The input array.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_gaussian : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_gaussian(input_, sigma=4)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = normalize_axis_index(axis, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
if not sigmas.flags.contiguous:
sigmas = sigmas.copy()
_nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
return output
def fourier_uniform(input, size, n=-1, axis=-1, output=None):
"""
Multidimensional uniform fourier filter.
The array is multiplied with the Fourier transform of a box of given
size.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_uniform : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_uniform(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = normalize_axis_index(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 1)
return output
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
"""
Multidimensional ellipsoid Fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : ndarray
The filtered input.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_ellipsoid(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = normalize_axis_index(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
return output
def fourier_shift(input, shift, n=-1, axis=-1, output=None):
"""
Multidimensional Fourier shift filter.
The array is multiplied with the Fourier transform of a shift operation.
Parameters
----------
input : array_like
The input array.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of shifting the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_shift : ndarray
The shifted input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> import numpy.fft
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_shift(input_, shift=200)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier_complex(output, input)
axis = normalize_axis_index(axis, input.ndim)
shifts = _ni_support._normalize_sequence(shift, input.ndim)
shifts = numpy.asarray(shifts, dtype=numpy.float64)
if not shifts.flags.contiguous:
shifts = shifts.copy()
_nd_image.fourier_shift(input, shifts, n, axis, output)
return output
|
pizzathief/scipy
|
scipy/ndimage/fourier.py
|
Python
|
bsd-3-clause
| 11,239
|
[
"Gaussian"
] |
37d44664cce2c56929d4ec7ceb0659dca85de6e07b90bd5443e5a7e930457331
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Sebastian Wouters <sebastianwouters@gmail.com>
#
# Date: May 18, 2015
#
# Augmented Hessian Newton-Raphson optimization of the RHF energy
#
# The gradient and hessian were determined from the equations in
# http://sebwouters.github.io/CheMPS2/doxygen/classCheMPS2_1_1CASSCF.html
# by throwing out all active space components.
#
# In the following:
# * (o,p) denote occupied spatial RHF orbitals
# * (v,w) denote virtual spatial RHF orbitals
# * f is the Fock operator
#
# \frac{\partial E}{\partial x_{ov}} = 4 * f_{vo}
#
# \frac{\partial^2 E}{\partial x_{ov} \partial x_{pw}} = 4 * delta_{op} * f_{vw}
# - 4 * delta_{vw} * f_{op}
# + 4 * [ 4 * (vo|wp) - (vw|op) - (vp|wo) ]
#
import sys
sys.stderr.write('''
Warning
tools/rhf_newtonraphson.py has been removed since PySCF-1.5. It is replaced by
the SOSCF module in pyscf/soscf module. For SCF calculations, SOSCF method
can be created by calling the .newton() method of RHF/UHF/GHF class.
''')
exit()
from pyscf import gto, scf
from pyscf.lib import logger
from pyscf.lib import linalg_helper
from pyscf.scf import _vhf
import numpy as np
import scipy
import scipy.sparse.linalg
class __JKengine:
def __init__( self, myMF, orbitals=None ):
self.mf = myMF
self.dm_prev = 0
self.vhf_prev = 0
self.orbs = orbitals
#self.iter = 0
def getJK_mo( self, dm_mo ):
dm_ao = np.dot( np.dot( self.orbs, dm_mo ), self.orbs.T )
JK_ao = self.mf.get_veff( self.mf.mol, dm_ao, dm_last=self.dm_prev, vhf_last=self.vhf_prev )
self.dm_prev = dm_ao
self.vhf_prev = JK_ao
JK_mo = np.dot( np.dot( self.orbs.T, JK_ao ), self.orbs )
#self.iter += 1
return JK_mo
def getJK_ao( self, dm_ao ):
JK_ao = self.mf.get_veff( self.mf.mol, dm_ao, dm_last=self.dm_prev, vhf_last=self.vhf_prev )
self.dm_prev = dm_ao
self.vhf_prev = JK_ao
#self.iter += 1
return JK_ao
def __wrapAugmentedHessian( FOCK_mo, numPairs, numVirt, myJK_mo ):
def matvec( vector ):
xblock = np.reshape( vector[:-1], ( numPairs, numVirt ), order='F' )
xscalar = vector[ len(vector)-1 ]
outblock = 4 * FOCK_mo[:numPairs,numPairs:] * xscalar \
+ 4 * np.dot( xblock, FOCK_mo[numPairs:,numPairs:] ) \
- 4 * np.dot( FOCK_mo[:numPairs,:numPairs], xblock )
fakedens = np.zeros( [ FOCK_mo.shape[0], FOCK_mo.shape[0] ], dtype=float )
fakedens[:numPairs,numPairs:] = xblock
fakedens[numPairs:,:numPairs] = xblock.T
outblock += 8 * ( myJK_mo.getJK_mo( fakedens )[:numPairs,numPairs:] )
result = np.zeros( [ len(vector) ], dtype=float )
result[ len(vector)-1 ] = 4 * np.einsum( 'ij,ij->', xblock, FOCK_mo[:numPairs,numPairs:] )
result[ :-1 ] = np.reshape( outblock, ( numPairs * numVirt ), order='F' )
return result
return matvec
def solve( myMF, dm_guess=None, safe_guess=True ):
assert(hasattr(myMF, 'mol'))
assert(hasattr(myMF, 'mo_occ'))
assert(hasattr(myMF, 'mo_coeff'))
assert(myMF.mol.nelectron % 2 == 0) # RHF possible
S_ao = myMF.get_ovlp( myMF.mol )
OEI_ao = myMF.get_hcore( myMF.mol )
numPairs = myMF.mol.nelectron // 2
numVirt = OEI_ao.shape[0] - numPairs
numVars = numPairs * numVirt
if ( dm_guess is None ):
if (( len( myMF.mo_occ ) == 0 ) or ( safe_guess == True )):
dm_ao = myMF.get_init_guess( key=myMF.init_guess, mol=myMF.mol )
else:
dm_ao = np.dot( np.dot( myMF.mo_coeff, np.diag( myMF.mo_occ ) ), myMF.mo_coeff.T )
else:
dm_ao = np.array( dm_guess, copy=True )
myJK_ao = __JKengine( myMF )
FOCK_ao = OEI_ao + myJK_ao.getJK_ao( dm_ao )
energies, orbitals = scipy.linalg.eigh( a=FOCK_ao, b=S_ao )
dm_ao = 2 * np.dot( orbitals[:,:numPairs], orbitals[:,:numPairs].T )
FOCK_ao = OEI_ao + myJK_ao.getJK_ao( dm_ao )
FOCK_mo = np.dot( orbitals.T, np.dot( FOCK_ao, orbitals ))
grdnorm = 4 * np.linalg.norm( FOCK_mo[:numPairs,numPairs:] )
energy = myMF.mol.energy_nuc() + 0.5 * np.einsum( 'ij,ij->', OEI_ao + FOCK_ao, dm_ao )
logger.note(myMF, "RHF:NewtonRaphson :: Starting augmented Hessian Newton-Raphson RHF.")
iteration = 0
while ( grdnorm > 1e-7 ):
iteration += 1
tempJK_mo = __JKengine( myMF, orbitals )
ini_guess = np.ones( [ numVars+1 ], dtype=float )
for occ in range( numPairs ):
for virt in range( numVirt ):
ini_guess[ occ + numPairs * virt ] = - FOCK_mo[ occ, numPairs + virt ] / max( FOCK_mo[ numPairs + virt, numPairs + virt ] - FOCK_mo[ occ, occ ], 1e-6 )
def myprecon( resid, eigval, eigvec ):
myprecon_cutoff = 1e-10
local_myprecon = np.zeros( [ numVars+1 ], dtype=float )
for occ in range( numPairs ):
for virt in range( numVirt ):
denominator = FOCK_mo[ numPairs + virt, numPairs + virt ] - FOCK_mo[ occ, occ ] - eigval
if ( abs( denominator ) < myprecon_cutoff ):
local_myprecon[ occ + numPairs * virt ] = eigvec[ occ + numPairs * virt ] / myprecon_cutoff
else:
# local_myprecon = eigvec / ( diag(H) - eigval ) = K^{-1} u
local_myprecon[ occ + numPairs * virt ] = eigvec[ occ + numPairs * virt ] / denominator
if ( abs( eigval ) < myprecon_cutoff ):
local_myprecon[ numVars ] = eigvec[ numVars ] / myprecon_cutoff
else:
local_myprecon[ numVars ] = - eigvec[ numVars ] / eigval
# alpha_myprecon = - ( r, K^{-1} u ) / ( u, K^{-1} u )
alpha_myprecon = - np.einsum( 'i,i->', local_myprecon, resid ) / np.einsum( 'i,i->', local_myprecon, eigvec )
# local_myprecon = r - ( r, K^{-1} u ) / ( u, K^{-1} u ) * u
local_myprecon = resid + alpha_myprecon * eigvec
for occ in range( numPairs ):
for virt in range( numVirt ):
denominator = FOCK_mo[ numPairs + virt, numPairs + virt ] - FOCK_mo[ occ, occ ] - eigval
if ( abs( denominator ) < myprecon_cutoff ):
local_myprecon[ occ + numPairs * virt ] = - local_myprecon[ occ + numPairs * virt ] / myprecon_cutoff
else:
local_myprecon[ occ + numPairs * virt ] = - local_myprecon[ occ + numPairs * virt ] / denominator
if ( abs( eigval ) < myprecon_cutoff ):
local_myprecon[ numVars ] = - local_myprecon[ occ + numPairs * virt ] / myprecon_cutoff
else:
local_myprecon[ numVars ] = local_myprecon[ occ + numPairs * virt ] / eigval
return local_myprecon
eigenval, eigenvec = linalg_helper.davidson( aop=__wrapAugmentedHessian( FOCK_mo, numPairs, numVirt, tempJK_mo ), \
x0=ini_guess, \
precond=myprecon, \
#tol=1e-14, \
#max_cycle=50, \
max_space=20, \
#lindep=1e-16, \
#max_memory=2000, \
nroots=1 )
#logger.note(myMF, " RHF:NewtonRaphson :: # JK computs (iteration %d) = %d", iteration, tempJK_mo.iter)
eigenvec = eigenvec / eigenvec[ numVars ]
update = np.reshape( eigenvec[:-1], ( numPairs, numVirt ), order='F' )
xmat = np.zeros( [ OEI_ao.shape[0], OEI_ao.shape[0] ], dtype=float )
xmat[:numPairs,numPairs:] = - update
xmat[numPairs:,:numPairs] = update.T
unitary = scipy.linalg.expm( xmat )
orbitals = np.dot( orbitals, unitary )
dm_ao = 2 * np.dot( orbitals[:,:numPairs], orbitals[:,:numPairs].T )
FOCK_ao = OEI_ao + myJK_ao.getJK_ao( dm_ao )
FOCK_mo = np.dot( orbitals.T, np.dot( FOCK_ao, orbitals ))
grdnorm = 4 * np.linalg.norm( FOCK_mo[:numPairs,numPairs:] )
energy = myMF.mol.energy_nuc() + 0.5 * np.einsum( 'ij,ij->', OEI_ao + FOCK_ao, dm_ao )
logger.note(myMF, " RHF:NewtonRaphson :: gradient norm (iteration %d) = %1.3g" , iteration, grdnorm)
logger.note(myMF, " RHF:NewtonRaphson :: RHF energy (iteration %d) = %1.15g", iteration, energy)
logger.note(myMF, "RHF:NewtonRaphson :: Convergence reached.")
logger.note(myMF, "RHF:NewtonRaphson :: Converged RHF energy = %1.15g", energy)
energies, orbitals = scipy.linalg.eigh( a=FOCK_ao, b=S_ao )
myMF.mo_coeff = orbitals
myMF.mo_occ = np.zeros( [ OEI_ao.shape[0] ], dtype=int )
myMF.mo_occ[:numPairs] = 2
myMF.mo_energy = energies
myMF.e_tot = energy
myMF.converged = True
return myMF
|
gkc1000/pyscf
|
pyscf/tools/rhf_newtonraphson.py
|
Python
|
apache-2.0
| 9,935
|
[
"PySCF"
] |
8d2a7b5503b1ff083938248d3c59cb0ac40e4b5d7b731271e754325148ae6a6b
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to make it easier to submit
# large numbers of jobs on supercomputers. It provides a python interface to physical input, such as
# crystal structures, as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs. It
# is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even
# the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along with PyLaDa. If not, see
# <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture, mark
from pylada import vasp_program
@fixture
def path():
from os.path import dirname
return dirname(__file__)
@mark.skipif(vasp_program is None, reason="vasp not configured")
def test(tmpdir, path):
from pylada.crystal import Structure
from pylada.vasp import Vasp
from epirelax import epitaxial
from pylada import default_comm
structure = Structure([[0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]], scale=5.55, name='has a name')\
.add_atom(0, 0, 0, "Si")\
.add_atom(0.25, 0.25, 0.25, "Si")
vasp = Vasp()
vasp.kpoints = "Automatic generation\n0\nMonkhorst\n2 2 2\n0 0 0"
vasp.prec = "accurate"
vasp.ediff = 1e-5
vasp.encut = 1.4
vasp.ismear = "fermi"
vasp.sigma = 0.01
vasp.relaxation = "volume"
vasp.add_specie = "Si", "{0}/pseudos/Si".format(path)
result = epitaxial(vasp, structure, outdir=str(tmpdir), epiconv=1e-4, comm=default_comm)
assert result.success
|
pylada/pylada-light
|
tests/vasp/test_runepidoc.py
|
Python
|
gpl-3.0
| 2,135
|
[
"CRYSTAL",
"VASP"
] |
469ca8b5ab2720a9f85275bd99bd20440d515722f3906da79b22e7a9220b1f1f
|
# -*- coding: utf-8 -*-
"""
@author: adosch <adam@wisehippy.com>
@author: SodaPhish <sodaphish@protonmail.ch>
"""
import sys,os,logging.config
from logging import handlers
try:
from sp.base import Exceptions
except:
print"must have splib installed in sys.path()"
sys.exit(1)
class LoggerConfig(object):
"""
LoggerConfig class object to setup and define root-level logging
inside a script or application setting in dictionary form to be
used as input back into the 'logging' module
"""
def __init__(self, loglevel, logfile=None, logfile_rotate=0, logfile_maxsize=0, logfile_level=logging.INFO,
syslog=None, syslog_host="/dev/log", syslog_port=514, syslog_facility=logging.handlers.SysLogHandler.LOG_USER,
syslog_level=logging.INFO, console=False, console_level=logging.DEBUG, *args, **kwargs):
self.log = None
self.handler_list = []
self.config = {}
self.loglevel = self._validate_loglevel(kwargs.get('logging.loglevel', loglevel))
self.logfile = self._check_dirpath(kwargs.get('logging.logfile', logfile))
self.logfile_rotate = int(kwargs.get('logging.logfile_rotate', logfile_rotate))
self.logfile_maxsize = int(kwargs.get('logging.logfile_maxsize', logfile_maxsize))
self.logfile_level = self._validate_loglevel(kwargs.get('logging.logfile_level', logfile_level))
self.syslog = kwargs.get('logging.syslog', syslog)
self.syslog_host = kwargs.get('logging.syslog_host', syslog_host)
self.syslog_port = kwargs.get('logging.syslog_port', syslog_port)
self.syslog_facility = self._validate_syslogfacility(kwargs.get('logging.syslog_facility', syslog_facility))
self.syslog_level = self._validate_sysloglevel(kwargs.get('logging.syslog_level', syslog_level))
self.console = kwargs.get('logging.console', console)
self.console_level = self._validate_loglevel(kwargs.get('logging.console_level', console_level))
self.version = 1
self.disable_existing_loggers = False
self.config['version'] = self.version
self.config['disable_existing_loggers'] = self.disable_existing_loggers
# For more formatting types, visit: https://docs.python.org/2/library/logging.html
# This should really be done in a lookup fashion of some sort?
self.config['formatters'] = {}
self.config['formatters']['standard'] = {}
self.config['formatters']['standard']['format'] = "[%(asctime)s] %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(message)s"
#self.config['formatters']['standard']['format'] = "[%(asctime)s] %(module)s %(levelname)s %(funcName)s:%(lineno)d %(message)s"
self.config['formatters']['standard']['datefmt'] = "%Y-%m-%d %H:%M:%S"
self.config['formatters']['syslog'] = {}
self.config['formatters']['syslog']['format'] = "%(module)s[%(process)d]: %(levelname)s %(message)s"
self.config['handlers'] = self._process_handlers()
self.config['loggers'] = self._process_root_logger()
def _return_formatstring(self, handlername):
pass
def _check_dirpath(self, filename):
try:
_dirname = os.path.dirname(filename)
if not _dirname:
_dirname = os.path.curdir
if all([os.access(_dirname, os.R_OK), os.access(_dirname, os.W_OK)]):
return filename
else:
raise Exceptions.DirectoryAccessError("Unable to read or write to directory location '%s'" % _dirname)
except AttributeError: # If original 'None' type is passed in
return None
def _validate_loglevel(self, level):
_level = level
if isinstance(_level, str):
result = logging.getLevelName(_level)
if isinstance(result, int):
return result
else:
raise Exceptions.InvalidLogLevelType("'%s' in not a valid log level" % _level)
else:
return _level
def _validate_sysloglevel(self, level):
_level = level
if isinstance(_level, str):
result = logging.handlers.SysLogHandler.priority_names.get(_level.lower())
if result:
return result
else:
raise Exceptions.InvalidLogLevelType("'%s' is not a valid Syslog level" % _level)
else:
return _level
def _validate_syslogfacility(self, level):
_level = level
if isinstance(_level, str):
result = logging.handlers.SysLogHandler.facility_names.get(_level.lower())
if result:
return result
else:
raise Exceptions.InvalidSyslogFacilityType("'%s' is not a valid Syslog facility level" % _level)
else:
return _level
def _create_console_handler(self, console, level):
pass
def _create_logfile_handler(self, loglevel, logfile, logfile_rotate, logfile_maxsize, logfile_level):
pass
def _create_syslog_handler(self, syslog, syslog_host, syslog_port, syslog_facility, syslog_level):
pass
def _process_handlers(self):
_handlers = {}
if self.logfile:
self.handler_list.append('logfile')
_handlers['logfile'] = {}
_handlers['logfile']['level'] = self.logfile_level
_handlers['logfile']['formatter'] = 'standard'
_handlers['logfile']['class'] = 'logging.handlers.RotatingFileHandler'
_handlers['logfile']['filename'] = self.logfile
_handlers['logfile']['maxBytes'] = self.logfile_maxsize
_handlers['logfile']['backupCount'] = self.logfile_rotate
if self.console:
self.handler_list.append('console')
_handlers['console'] = {}
_handlers['console']['level'] = self.console_level
_handlers['console']['formatter'] = 'standard'
_handlers['console']['class'] = 'logging.StreamHandler'
_handlers['console']['stream'] = sys.stdout # '/dev/stdout'
if self.syslog:
self.handler_list.append('syslog')
_handlers['syslog'] = {}
_handlers['syslog']['level'] = self.syslog_level
_handlers['syslog']['formatter'] = 'syslog'
_handlers['syslog']['class'] = 'logging.handlers.SysLogHandler'
if self.syslog_host <> "/dev/log":
_address = (self.syslog_host, self.syslog_port)
else:
_address = self.syslog_host
_handlers['syslog']['address'] = _address
_handlers['syslog']['facility'] = self.syslog_facility
return _handlers
def _process_root_logger(self):
_loggername = ''
_loggers = {}
_loggers[_loggername] = {}
_loggers[_loggername]['handlers'] = self.handler_list
_loggers[_loggername]['level'] = self.loglevel
return _loggers
|
sodaphish/break
|
sp/base/Logging.py
|
Python
|
mit
| 7,145
|
[
"VisIt"
] |
044df0b753cf55b922178d71bb1d1b90aab0282b31ad2298830bcf94c87814a0
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides classes to create phase diagrams.
"""
from six.moves import filter
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Nov 25, 2012"
import collections
import numpy as np
from pyhull.simplex import Simplex
from pymatgen.serializers.json_coders import PMGSONable, MontyDecoder
try:
# If scipy ConvexHull exists, use it because it is faster for large hulls.
# This requires scipy >= 0.12.0.
from scipy.spatial import ConvexHull
HULL_METHOD = "scipy"
except ImportError:
# Fall back to pyhull if scipy >= 0.12.0 does not exist.
from pyhull.convex_hull import ConvexHull
HULL_METHOD = "pyhull"
from pymatgen.core.periodic_table import get_el_sp
from pymatgen.core.composition import Composition
from pymatgen.phasediagram.entries import GrandPotPDEntry, TransformedPDEntry
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.core.periodic_table import DummySpecie, Element
from pymatgen.analysis.reaction_calculator import Reaction, ReactionError
class PhaseDiagram (PMGSONable):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...]
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = list(elements)
dim = len(elements)
el_refs = {}
for el in elements:
el_entries = list(filter(lambda e: e.composition.is_element and
e.composition.elements[0] == el,
entries))
if len(el_entries) == 0:
raise PhaseDiagramError(
"There are no entries associated with terminal {}."
.format(el))
el_refs[el] = min(el_entries, key=lambda e: e.energy_per_atom)
data = []
for entry in entries:
comp = entry.composition
row = [comp.get_atomic_fraction(el) for el in elements]
row.append(entry.energy_per_atom)
data.append(row)
data = np.array(data)
self.all_entries_hulldata = data[:, 1:]
#use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
#make sure that if there are multiple entries at the same composition
#within 1e-4 eV/atom of each other, only use the lower energy one.
#This fixes the precision errors in the convex hull.
#This is significantly faster than grouping by composition and then
#taking the lowest energy of each group
ind = []
prev_c = [] # compositions within 1e-4 of current entry
prev_e = [] # energies of those compositions
for i in np.argsort([e.energy_per_atom for e in entries]):
if form_e[i] > -self.formation_energy_tol:
continue
epa = entries[i].energy_per_atom
#trim the front of the lists
while prev_e and epa > 1e-4 + prev_e[0]:
prev_c.pop(0)
prev_e.pop(0)
frac_comp = entries[i].composition.fractional_composition
if frac_comp not in prev_c:
ind.append(i)
prev_e.append(epa)
prev_c.append(frac_comp)
#add the elemental references
ind.extend([entries.index(el) for el in el_refs.values()])
qhull_entries = [entries[i] for i in ind]
qhull_data = data[ind][:, 1:]
#add an extra point to enforce full dimensionality
#this point will be present in all upper hull facets
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
self.facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
finalfacets = []
for facet in facets:
#skip facets that include the extra point
if max(facet) == len(qhull_data)-1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
finalfacets.append(facet)
self.facets = finalfacets
self.simplices = [Simplex(qhull_data[f, :-1]) for f in self.facets]
self.all_entries = entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
stable_entries = set()
for facet in self.facets:
for vertex in facet:
stable_entries.add(self.qhull_entries[vertex])
return stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
comp = entry.composition
energy = entry.energy - sum([comp[el] *
self.el_refs[el].energy_per_atom
for el in comp.elements])
return energy
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry: An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
comp = entry.composition
return self.get_form_energy(entry) / comp.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = [ComputedEntry.from_dict(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
return cls(entries, elements)
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u\ :sub:`X` N\ :sub:`X`\
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots {Element: float}: Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = []
for e in entries:
if len(set(e.composition.elements).intersection(set(elements))) > 0:
all_entries.append(GrandPotPDEntry(e, self.chempots))
super(GrandPotentialPhaseDiagram, self).__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
super(CompoundPhaseDiagram, self).__init__(
pentries, elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.fractional_composition
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
#Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
#We only allow reactions that have positive amounts of
#reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
#If the reaction can't be balanced, the entry does not fall
#into the phase space. We ignore them.
pass
return new_entries, sp_mapping
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict()
for c in self.terminal_compositions],
"normalize_terminal_compositions":
self.normalize_terminal_compositions}
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions,
d["normalize_terminal_compositions"])
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False, force_use_pyhull=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
force_use_pyhull (boolean): Whether the pyhull algorithm is always
used, even when scipy is present.
Returns:
List of simplices of the Convex Hull.
"""
if HULL_METHOD == "scipy" and (not force_use_pyhull):
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
else:
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
else:
return ConvexHull(qhull_data, joggle=joggle).vertices
|
sonium0/pymatgen
|
pymatgen/phasediagram/pdmaker.py
|
Python
|
mit
| 18,442
|
[
"pymatgen"
] |
ad94c8d31568f7b54f8d76891850912b23ff08c735cc3d3be033f8feca0e031c
|
from __future__ import annotations
import importlib
import types
from typing import (
TYPE_CHECKING,
Sequence,
)
from pandas._config import get_option
from pandas._typing import IndexLabel
from pandas.util._decorators import (
Appender,
Substitution,
)
from pandas.core.dtypes.common import (
is_integer,
is_list_like,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.base import PandasObject
if TYPE_CHECKING:
from pandas import DataFrame
def hist_series(
self,
by=None,
ax=None,
grid: bool = True,
xlabelsize: int | None = None,
xrot: float | None = None,
ylabelsize: int | None = None,
yrot: float | None = None,
figsize: tuple[int, int] | None = None,
bins: int | Sequence[int] = 10,
backend: str | None = None,
legend: bool = False,
**kwargs,
):
"""
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups.
ax : matplotlib axis object
If not passed, uses gca().
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels.
figsize : tuple, default None
Figure size in inches by default.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
legend : bool, default False
Whether to show the legend.
.. versionadded:: 1.1.0
**kwargs
To be passed to the actual plotting function.
Returns
-------
matplotlib.AxesSubplot
A histogram plot.
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_series(
self,
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
legend=legend,
**kwargs,
)
def hist_frame(
data: DataFrame,
column: IndexLabel = None,
by=None,
grid: bool = True,
xlabelsize: int | None = None,
xrot: float | None = None,
ylabelsize: int | None = None,
yrot: float | None = None,
ax=None,
sharex: bool = False,
sharey: bool = False,
figsize: tuple[int, int] | None = None,
layout: tuple[int, int] | None = None,
bins: int | Sequence[int] = 10,
backend: str | None = None,
legend: bool = False,
**kwargs,
):
"""
Make a histogram of the DataFrame's columns.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
data : DataFrame
The pandas object holding the data.
column : str or sequence, optional
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels. For example, a value of 90 displays the
x labels rotated 90 degrees clockwise.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels. For example, a value of 90 displays the
y labels rotated 90 degrees clockwise.
ax : Matplotlib axes object, default None
The axes to plot the histogram on.
sharex : bool, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in.
Note that passing in both an ax and sharex=True will alter all x axis
labels for all subplots in a figure.
sharey : bool, default False
In case subplots=True, share y axis and set some y axis labels to
invisible.
figsize : tuple, optional
The size in inches of the figure to create. Uses the value in
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
legend : bool, default False
Whether to show the legend.
.. versionadded:: 1.1.0
**kwargs
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
This example draws a histogram based on the length and width of
some animals, displayed in three bins
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_frame(
data,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
legend=legend,
bins=bins,
**kwargs,
)
_boxplot_doc = """
Make a box plot from DataFrame columns.
Make a box-and-whisker plot from DataFrame columns, optionally grouped
by some other columns. A box plot is a method for graphically depicting
groups of numerical data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. By default, they extend no more than
`1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box, ending at the farthest
data point within that interval. Outliers are plotted as separate dots.
For further details see
Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.
Parameters
----------
column : str or list of str, optional
Column name or list of names, or vector.
Can be any valid input to :meth:`pandas.DataFrame.groupby`.
by : str or array-like, optional
Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.
One box-plot will be done per value of columns in `by`.
ax : object of class matplotlib.axes.Axes, optional
The matplotlib axes to be used by boxplot.
fontsize : float or str
Tick label font size in points or as a string (e.g., `large`).
rot : int or float, default 0
The rotation angle of labels (in degrees)
with respect to the screen coordinate system.
grid : bool, default True
Setting this to True will show the grid.
figsize : A tuple (width, height) in inches
The size of the figure to create in matplotlib.
layout : tuple (rows, columns), optional
For example, (3, 5) will display the subplots
using 3 columns and 5 rows, starting from the top-left.
return_type : {'axes', 'dict', 'both'} or None, default 'axes'
The kind of object to return. The default is ``axes``.
* 'axes' returns the matplotlib axes the boxplot is drawn on.
* 'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot.
* 'both' returns a namedtuple with the axes and dict.
* when grouping with ``by``, a Series mapping columns to
``return_type`` is returned.
If ``return_type`` is `None`, a NumPy array
of axes with the same shape as ``layout`` is returned.
%(backend)s\
**kwargs
All other plotting keyword arguments to be passed to
:func:`matplotlib.pyplot.boxplot`.
Returns
-------
result
See Notes.
See Also
--------
Series.plot.hist: Make a histogram.
matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
Notes
-----
The return type depends on the `return_type` parameter:
* 'axes' : object of class matplotlib.axes.Axes
* 'dict' : dict of matplotlib.lines.Line2D objects
* 'both' : a namedtuple with structure (ax, lines)
For data grouped with ``by``, return a Series of the above or a numpy
array:
* :class:`~pandas.Series`
* :class:`~numpy.array` (for ``return_type = None``)
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
Examples
--------
Boxplots can be created for every column in the dataframe
by ``df.boxplot()`` or indicating the columns to be used:
.. plot::
:context: close-figs
>>> np.random.seed(1234)
>>> df = pd.DataFrame(np.random.randn(10, 4),
... columns=['Col1', 'Col2', 'Col3', 'Col4'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3'])
Boxplots of variables distributions grouped by the values of a third
variable can be created using the option ``by``. For instance:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['Col1', 'Col2'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> boxplot = df.boxplot(by='X')
A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot
in order to group the data by combination of the variables in the x-axis:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 3),
... columns=['Col1', 'Col2', 'Col3'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',
... 'B', 'A', 'B', 'A', 'B'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
The layout of boxplot can be adjusted giving a tuple to ``layout``:
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... layout=(2, 1))
Additional formatting can be done to the boxplot, like suppressing the grid
(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)
or changing the fontsize (i.e. ``fontsize=15``):
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15)
The parameter ``return_type`` can be used to select the type of element
returned by `boxplot`. When ``return_type='axes'`` is selected,
the matplotlib axes on which the boxplot is drawn are returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes')
>>> type(boxplot)
<class 'matplotlib.axes._subplots.AxesSubplot'>
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type='axes')
>>> type(boxplot)
<class 'pandas.core.series.Series'>
If ``return_type`` is `None`, a NumPy array of axes with the same shape
as ``layout`` is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type=None)
>>> type(boxplot)
<class 'numpy.ndarray'>
"""
_backend_doc = """\
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
"""
_bar_or_line_doc = """
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
color : str, array-like, or dict, optional
The color for each of the DataFrame's columns. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each column recursively. For
instance ['green','yellow'] each column's %(kind)s will be filled in
green or yellow, alternatively. If there is only a single column to
be plotted, then only the first color from the color list will be
used.
- A dict of the form {column name : color}, so that each column will be
colored accordingly. For example, if your columns are called `a` and
`b`, then passing {'a': 'green', 'b': 'red'} will color %(kind)ss for
column `a` in green and %(kind)ss for column `b` in red.
.. versionadded:: 1.1.0
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
"""
@Substitution(backend="")
@Appender(_boxplot_doc)
def boxplot(
data,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs,
):
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.boxplot(
data,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
@Substitution(backend=_backend_doc)
@Appender(_boxplot_doc)
def boxplot_frame(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame(
self,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
def boxplot_frame_groupby(
grouped,
subplots=True,
column=None,
fontsize=None,
rot=0,
grid=True,
ax=None,
figsize=None,
layout=None,
sharex=False,
sharey=True,
backend=None,
**kwargs,
):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots : bool
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group.
column : column name or list of names, or vector
Can be any valid input to groupby.
fontsize : int or str
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
The layout of the plot: (rows, columns).
sharex : bool, default False
Whether x-axes will be shared among subplots.
sharey : bool, default True
Whether y-axes will be shared among subplots.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
All other plotting keyword arguments to be passed to
matplotlib's boxplot function.
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
You can create boxplots for grouped data and show them as separate subplots:
.. plot::
:context: close-figs
>>> import itertools
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)
>>> grouped = df.groupby(level='lvl1')
>>> grouped.boxplot(rot=45, fontsize=12, figsize=(8,10))
The ``subplots=False`` option shows the boxplots in a single figure.
.. plot::
:context: close-figs
>>> grouped.boxplot(subplots=False, rot=45, fontsize=12)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame_groupby(
grouped,
subplots=subplots,
column=column,
fontsize=fontsize,
rot=rot,
grid=grid,
ax=ax,
figsize=figsize,
layout=layout,
sharex=sharex,
sharey=sharey,
**kwargs,
)
class PlotAccessor(PandasObject):
"""
Make plots of Series or DataFrame.
Uses the backend specified by the
option ``plotting.backend``. By default, matplotlib is used.
Parameters
----------
data : Series or DataFrame
The object for which the method is called.
x : label or position, default None
Only used if data is a DataFrame.
y : label, position or list of label, positions, default None
Allows plotting of one column versus another. Only used if data is a
DataFrame.
kind : str
The kind of plot to produce:
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot (DataFrame only)
- 'hexbin' : hexbin plot (DataFrame only)
ax : matplotlib axes object, default None
An axes of the current figure.
subplots : bool, default False
Make separate subplots for each column.
sharex : bool, default True if ax is None else False
In case ``subplots=True``, share x axis and set some x axis labels
to invisible; defaults to True if ax is None otherwise False if
an ax is passed in; Be aware, that passing in both an ax and
``sharex=True`` will alter all x axis labels for all axis in a figure.
sharey : bool, default False
In case ``subplots=True``, share y axis and set some y axis labels to invisible.
layout : tuple, optional
(rows, columns) for the layout of subplots.
figsize : a tuple (width, height) in inches
Size of a figure object.
use_index : bool, default True
Use index as ticks for x axis.
title : str or list
Title to use for the plot. If a string is passed, print the string
at the top of the figure. If a list is passed and `subplots` is
True, print each item in the list above the corresponding subplot.
grid : bool, default None (matlab style default)
Axis grid lines.
legend : bool or {'reverse'}
Place legend on axis subplots.
style : list or dict
The matplotlib line style per column.
logx : bool or 'sym', default False
Use log scaling or symlog scaling on x axis.
.. versionchanged:: 0.25.0
logy : bool or 'sym' default False
Use log scaling or symlog scaling on y axis.
.. versionchanged:: 0.25.0
loglog : bool or 'sym', default False
Use log scaling or symlog scaling on both x and y axes.
.. versionchanged:: 0.25.0
xticks : sequence
Values to use for the xticks.
yticks : sequence
Values to use for the yticks.
xlim : 2-tuple/list
Set the x limits of the current axes.
ylim : 2-tuple/list
Set the y limits of the current axes.
xlabel : label, optional
Name to use for the xlabel on x-axis. Default uses index name as xlabel, or the
x-column name for planar plots.
.. versionadded:: 1.1.0
.. versionchanged:: 1.2.0
Now applicable to planar plots (`scatter`, `hexbin`).
ylabel : label, optional
Name to use for the ylabel on y-axis. Default will show no ylabel, or the
y-column name for planar plots.
.. versionadded:: 1.1.0
.. versionchanged:: 1.2.0
Now applicable to planar plots (`scatter`, `hexbin`).
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal
plots).
fontsize : int, default None
Font size for xticks and yticks.
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
colorbar : bool, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin'
plots).
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center).
table : bool, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data
will be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a
table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : DataFrame, Series, array-like, dict and str
Equivalent to yerr.
stacked : bool, default False in line and bar plots, and True in area plot
If True, create stacked plot.
sort_columns : bool, default False
Sort column names to determine plot ordering.
secondary_y : bool or sequence, default False
Whether to plot on the secondary y-axis if a list/tuple, which
columns to plot on secondary y-axis.
mark_right : bool, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend.
include_bool : bool, default is False
If True, boolean values can be plotted.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
Options to pass to matplotlib plotting method.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
If the backend is not the default matplotlib one, the return value
will be the object returned by the backend.
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center)
"""
_common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box")
_series_kinds = ("pie",)
_dataframe_kinds = ("scatter", "hexbin")
_kind_aliases = {"density": "kde"}
_all_kinds = _common_kinds + _series_kinds + _dataframe_kinds
def __init__(self, data):
self._parent = data
@staticmethod
def _get_call_args(backend_name, data, args, kwargs):
"""
This function makes calls to this accessor `__call__` method compatible
with the previous `SeriesPlotMethods.__call__` and
`DataFramePlotMethods.__call__`. Those had slightly different
signatures, since `DataFramePlotMethods` accepted `x` and `y`
parameters.
"""
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", False),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("label", None),
("secondary_y", False),
("xlabel", None),
("ylabel", None),
]
elif isinstance(data, ABCDataFrame):
arg_def = [
("x", None),
("y", None),
("kind", "line"),
("ax", None),
("subplots", False),
("sharex", None),
("sharey", False),
("layout", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", True),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("secondary_y", False),
("sort_columns", False),
("xlabel", None),
("ylabel", None),
]
else:
raise TypeError(
f"Called plot accessor for type {type(data).__name__}, "
"expected Series or DataFrame"
)
if args and isinstance(data, ABCSeries):
positional_args = str(args)[1:-1]
keyword_args = ", ".join(
[f"{name}={repr(value)}" for (name, _), value in zip(arg_def, args)]
)
msg = (
"`Series.plot()` should not be called with positional "
"arguments, only keyword arguments. The order of "
"positional arguments will change in the future. "
f"Use `Series.plot({keyword_args})` instead of "
f"`Series.plot({positional_args})`."
)
raise TypeError(msg)
pos_args = {name: value for (name, _), value in zip(arg_def, args)}
if backend_name == "pandas.plotting._matplotlib":
kwargs = dict(arg_def, **pos_args, **kwargs)
else:
kwargs = dict(pos_args, **kwargs)
x = kwargs.pop("x", None)
y = kwargs.pop("y", None)
kind = kwargs.pop("kind", "line")
return x, y, kind, kwargs
def __call__(self, *args, **kwargs):
plot_backend = _get_plot_backend(kwargs.pop("backend", None))
x, y, kind, kwargs = self._get_call_args(
plot_backend.__name__, self._parent, args, kwargs
)
kind = self._kind_aliases.get(kind, kind)
# when using another backend, get out of the way
if plot_backend.__name__ != "pandas.plotting._matplotlib":
return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs)
if kind not in self._all_kinds:
raise ValueError(f"{kind} is not a valid plot kind")
# The original data structured can be transformed before passed to the
# backend. For example, for DataFrame is common to set the index as the
# `x` parameter, and return a Series with the parameter `y` as values.
data = self._parent.copy()
if isinstance(data, ABCSeries):
kwargs["reuse_plot"] = True
if kind in self._dataframe_kinds:
if isinstance(data, ABCDataFrame):
return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs)
else:
raise ValueError(f"plot kind {kind} can only be used for data frames")
elif kind in self._series_kinds:
if isinstance(data, ABCDataFrame):
if y is None and kwargs.get("subplots") is False:
raise ValueError(
f"{kind} requires either y column or 'subplots=True'"
)
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
elif isinstance(data, ABCDataFrame):
data_cols = data.columns
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data_cols[x]
elif not isinstance(data[x], ABCSeries):
raise ValueError("x must be a label or position")
data = data.set_index(x)
if y is not None:
# check if we have y as int or list of ints
int_ylist = is_list_like(y) and all(is_integer(c) for c in y)
int_y_arg = is_integer(y) or int_ylist
if int_y_arg and not data.columns.holds_integer():
y = data_cols[y]
label_kw = kwargs["label"] if "label" in kwargs else False
for kw in ["xerr", "yerr"]:
if kw in kwargs and (
isinstance(kwargs[kw], str) or is_integer(kwargs[kw])
):
try:
kwargs[kw] = data[kwargs[kw]]
except (IndexError, KeyError, TypeError):
pass
# don't overwrite
data = data[y].copy()
if isinstance(data, ABCSeries):
label_name = label_kw or y
data.name = label_name
else:
match = is_list_like(label_kw) and len(label_kw) == len(y)
if label_kw and not match:
raise ValueError(
"label should be list-like and same length as y"
)
label_name = label_kw or data.columns
data.columns = label_name
return plot_backend.plot(data, kind=kind, **kwargs)
__call__.__doc__ = __doc__
@Appender(
"""
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series([1, 3, 2])
>>> s.plot.line()
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
Let's repeat the same example, but specifying colors for
each column (in this case, for each animal).
>>> axes = df.plot.line(
... subplots=True, color={"pig": "pink", "horse": "#742802"}
... )
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
)
@Substitution(kind="line")
@Appender(_bar_or_line_doc)
def line(self, x=None, y=None, **kwargs):
"""
Plot Series or DataFrame as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
"""
return self(kind="line", x=x, y=y, **kwargs)
@Appender(
"""
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Plot stacked bar charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.bar(stacked=True)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
If you don't like the default colours, you can specify how you'd
like each column to be colored.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(
... rot=0, subplots=True, color={"speed": "red", "lifespan": "green"}
... )
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
)
@Substitution(kind="bar")
@Appender(_bar_or_line_doc)
def bar(self, x=None, y=None, **kwargs):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
"""
return self(kind="bar", x=x, y=y, **kwargs)
@Appender(
"""
See Also
--------
DataFrame.plot.bar: Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot stacked barh charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.barh(stacked=True)
We can specify colors for each column
.. plot::
:context: close-figs
>>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"})
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
)
@Substitution(kind="bar")
@Appender(_bar_or_line_doc)
def barh(self, x=None, y=None, **kwargs):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
"""
return self(kind="barh", x=x, y=y, **kwargs)
def box(self, by=None, **kwargs):
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : str or sequence
Column in the DataFrame to group by.
**kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
DataFrame.boxplot: Another method to draw a box plot.
Series.plot.box: Draw a box plot from a Series object.
matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list('ABCD'))
>>> ax = df.plot.box()
"""
return self(kind="box", by=by, **kwargs)
def hist(self, by=None, bins=10, **kwargs):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
class:`matplotlib.AxesSubplot`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind="hist", by=by, bins=bins, **kwargs)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
In statistics, `kernel density estimation`_ (KDE) is a non-parametric
way to estimate the probability density function (PDF) of a random
variable. This function uses Gaussian kernels and includes automatic
bandwidth determination.
.. _kernel density estimation:
https://en.wikipedia.org/wiki/Kernel_density_estimation
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
ind : NumPy array or int, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs
Additional keyword arguments are documented in
:meth:`pandas.%(this-datatype)s.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
See Also
--------
scipy.stats.gaussian_kde : Representation of a kernel-density
estimate using Gaussian kernels. This is the function used
internally to estimate the PDF.
Examples
--------
Given a Series of points randomly sampled from an unknown
distribution, estimate its PDF using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])
For DataFrame, it works in the same way:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def area(self, x=None, y=None, **kwargs):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.
Examples
--------
Draw an area plot based on basic business metrics:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
pass ``stacked=False``:
.. plot::
:context: close-figs
>>> ax = df.plot.area(stacked=False)
Draw an area plot for a single column:
.. plot::
:context: close-figs
>>> ax = df.plot.area(y='sales')
Draw with a different `x`:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3],
... 'visits': [20, 42, 28],
... 'day': [1, 2, 3],
... })
>>> ax = df.plot.area(x='day')
"""
return self(kind="area", x=x, y=y, **kwargs)
def pie(self, **kwargs):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
See Also
--------
Series.plot.pie : Generate a pie plot for a Series.
DataFrame.plot : Make plots of a DataFrame.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot.pie(y='mass', figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(11, 6))
"""
if (
isinstance(self._parent, ABCDataFrame)
and kwargs.get("y", None) is None
and not kwargs.get("subplots", False)
):
raise ValueError("pie requires either y column or 'subplots=True'")
return self(kind="pie", **kwargs)
def scatter(self, x, y, s=None, c=None, **kwargs):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : str, scalar or array-like, optional
The size of each point. Possible values are:
- A string with the name of the column to be used for marker's size.
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
.. versionchanged:: 1.1.0
c : str, int or array-like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs):
"""
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.AxesSubplot
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({'x': np.random.randn(n),
... 'y': np.random.randn(n)})
>>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame({
... 'coord_x': np.random.uniform(-3, 3, size=n),
... 'coord_y': np.random.uniform(30, 50, size=n),
... 'observations': np.random.randint(1,5, size=n)
... })
>>> ax = df.plot.hexbin(x='coord_x',
... y='coord_y',
... C='observations',
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis")
"""
if reduce_C_function is not None:
kwargs["reduce_C_function"] = reduce_C_function
if gridsize is not None:
kwargs["gridsize"] = gridsize
return self(kind="hexbin", x=x, y=y, C=C, **kwargs)
_backends: dict[str, types.ModuleType] = {}
def _load_backend(backend: str) -> types.ModuleType:
"""
Load a pandas plotting backend.
Parameters
----------
backend : str
The identifier for the backend. Either an entrypoint item registered
with pkg_resources, "matplotlib", or a module name.
Returns
-------
types.ModuleType
The imported backend.
"""
from importlib.metadata import entry_points
if backend == "matplotlib":
# Because matplotlib is an optional dependency and first-party backend,
# we need to attempt an import here to raise an ImportError if needed.
try:
module = importlib.import_module("pandas.plotting._matplotlib")
except ImportError:
raise ImportError(
"matplotlib is required for plotting when the "
'default backend "matplotlib" is selected.'
) from None
return module
found_backend = False
eps = entry_points()
if "pandas_plotting_backends" in eps:
for entry_point in eps["pandas_plotting_backends"]:
found_backend = entry_point.name == backend
if found_backend:
module = entry_point.load()
break
if not found_backend:
# Fall back to unregistered, module name approach.
try:
module = importlib.import_module(backend)
found_backend = True
except ImportError:
# We re-raise later on.
pass
if found_backend:
if hasattr(module, "plot"):
# Validate that the interface is implemented when the option is set,
# rather than at plot time.
return module
raise ValueError(
f"Could not find plotting backend '{backend}'. Ensure that you've "
f"installed the package providing the '{backend}' entrypoint, or that "
"the package has a top-level `.plot` method."
)
def _get_plot_backend(backend: str | None = None):
"""
Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`).
The plotting system of pandas uses matplotlib by default, but the idea here
is that it can also work with other third-party backends. This function
returns the module which provides a top-level `.plot` method that will
actually do the plotting. The backend is specified from a string, which
either comes from the keyword argument `backend`, or, if not specified, from
the option `pandas.options.plotting.backend`. All the rest of the code in
this file uses the backend specified there for the plotting.
The backend is imported lazily, as matplotlib is a soft dependency, and
pandas can be used without it being installed.
Notes
-----
Modifies `_backends` with imported backend as a side effect.
"""
backend = backend or get_option("plotting.backend")
if backend in _backends:
return _backends[backend]
module = _load_backend(backend)
_backends[backend] = module
return module
|
gfyoung/pandas
|
pandas/plotting/_core.py
|
Python
|
bsd-3-clause
| 61,841
|
[
"Gaussian"
] |
e85704e89256685fd50c89d95938ead3b84b2475413872b23f3c0fd48e6d68a8
|
"""
Created on 6/05/2013
@author: thom
"""
from evaluator import Evaluator
from molecule import Molecule
from molecular_population import MolecularPopulation
import logging
import os
from rdkit.Chem import Draw
from rdkit.Chem import AllChem as Chem
class DrawMolecules(Evaluator):
"""Draw to a file the longest molecule found in the final population.
"""
def get_result_titles(self):
return []
@classmethod
def evaluate(self, results_filename, **kwargs):
results = Evaluator.load_results(results_filename)
population = MolecularPopulation(population=results['initial_population'], reactions=results['reactions'], size=100)
final_items = set(item for item in population.get_items() if population.get_quantity(item) > 0)
max_atoms = 0
for smiles in final_items:
mol = Molecule(smiles)
if mol.GetNumAtoms() > max_atoms:
max_mol = mol
max_atoms = mol.GetNumAtoms()
logging.info("Longest molecule found is {}".format(Chem.MolToSmiles(max_mol)))
Chem.Compute2DCoords(max_mol)
Draw.MolToFile(max_mol, os.path.splitext(results_filename)[0] + "-molecule.png", fitImage=True, size=(2000, 2000))
|
th0mmeke/toyworld
|
evaluators/draw_molecules.py
|
Python
|
gpl-3.0
| 1,244
|
[
"RDKit"
] |
025e90a07061b6516d800b9af1471bb040b91aec0be2738013caf1a188c4edf9
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0009_auto_20150429_0450'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
],
options={
'ordering': ('name',),
},
),
migrations.AlterField(
model_name='visit',
name='location',
field=models.ForeignKey(related_name='visits', to='visit.Location'),
),
]
|
koebbe/homeworks
|
visit/migrations/0010_auto_20150429_1453.py
|
Python
|
mit
| 803
|
[
"VisIt"
] |
a84f12a687eec561e1fc485fc2d1fd42715b06dad72ee9011146f2515e19bebc
|
""" Functions to run full experiment """
import contour_utils as cc
import experiment_utils as eu
import mv_gaussian as mv
import clf_utils as cu
import generate_melody as gm
import pandas as pd
import numpy as np
import random
import json
import os
from contour_utils import getFeatureInfo
from sklearn.externals import joblib
def run_glassceiling_experiment(meltype):
def get_fpaths(trackid, meltype):
contour_suffix = \
"MIX_vamp_melodia-contours_melodia-contours_contoursall.csv"
contours_output_path = "melodia_contours"
contour_suffix = "MIX.pitch.ctr"
contours_output_path = "/Users/jjb/Documents/PhD/data/MedleyDB/Conv_mu-1_G-0_LHSF-0_pC-27.56_pDTh-1.2_pFTh-0.9_tC-75_mD-100_vxTol-1_Pchvx-1_wNoteTrans-1_wContourTrans-1_wInstrTrans-5_scale-1_-_scaleSurr-1"
annot_suffix = "MELODY%s.csv" % str(meltype)
mel_dir = "MELODY%s" % str(meltype)
annot_path = os.path.join(os.environ['MEDLEYDB_PATH'], 'Annotations',
'Melody_Annotations', mel_dir)
contour_fname = "%s_%s" % (test_track, contour_suffix)
contour_fpath = os.path.join(contours_output_path, contour_fname)
annot_fname = "%s_%s" % (test_track, annot_suffix)
annot_fpath = os.path.join(annot_path, annot_fname)
# For MEDLEY with SIMM -------------------------
contour_suffix = "MIX.pitch.ctr"
contours_output_path = "/Users/jjb/Google Drive/PhD/conferences/ISMIR2016/SIMM-PC/MedleyDB/C4-Contours/Conv_mu-1_G-0_LHSF-0_pC-27.56_pDTh-0.9_pFTh-0.9_tC-50_mD-100"
annot_suffix = "MELODY%s.csv" % str(meltype)
mel_dir = "MELODY%s" % str(meltype)
annot_path = os.path.join(os.environ['MEDLEYDB_PATH'], 'Annotations',
'Melody_Annotations', mel_dir)
contour_fname = "%s_%s" % (test_track, contour_suffix)
contour_fpath = os.path.join(contours_output_path, contour_fname)
annot_fname = "%s_%s" % (test_track, annot_suffix)
annot_fpath = os.path.join(annot_path, annot_fname)
# Fot ORCHSET with SIMM --------------------------
contour_suffix = "pitch.ctr"
contours_output_path = "/Users/jjb/Google Drive/PhD/conferences/ISMIR2016/SIMM-PC/Orchset/C4-Contours/Conv_mu-1_G-0_LHSF-0_pC-27.56_pDTh-1.3_pFTh-0.9_tC-50_mD-100"
annot_suffix = "mel"
annot_path = os.path.join('/Users/jjb/Google Drive/data/segments/excerpts/GT')
contour_fname = "%s.%s" % (test_track, contour_suffix)
contour_fpath = os.path.join(contours_output_path, contour_fname)
annot_fname = "%s.%s" % (test_track, annot_suffix)
annot_fpath = os.path.join(annot_path, annot_fname)
# For ORCHSET with MELODIA (BIT)--------------------------
annot_path = os.path.join('/Users/jjb/Google Drive/data/segments/excerpts/GT')
contour_suffix = \
"_vamp_melodia-contours_melodia-contours_contoursall.csv"
contours_output_path = "/Users/jjb/Google Drive/PhD/conferences/ISMIR2016/SIMM-PC/Orchset/BIT"
annot_suffix = "mel"
contour_fname = "%s%s" % (test_track, contour_suffix)
contour_fpath = os.path.join(contours_output_path, contour_fname)
annot_fname = "%s.%s" % (test_track, annot_suffix)
annot_fpath = os.path.join(annot_path, annot_fname)
# Fot ORCHSET with SIMM --------------------------
contour_suffix = "pitch.ctr"
contours_output_path = "/Users/jjb/Google Drive/PhD/conferences/ISMIR2016/SIMM-PC/Orchset/C4-Contours/Conv_mu-1_G-0_LHSF-0_pC-27.56_pDTh-0.9_pFTh-0.9_tC-50_mD-100"
#contours_output_path = "/Users/jjb/Google Drive/PhD/Tests/Orchset/ScContours/"
annot_suffix = "mel"
annot_path = os.path.join('/Users/jjb/Google Drive/data/segments/excerpts/GT')
contour_fname = "%s.%s" % (test_track, contour_suffix)
contour_fpath = os.path.join(contours_output_path, contour_fname)
annot_fname = "%s.%s" % (test_track, annot_suffix)
annot_fpath = os.path.join(annot_path, annot_fname)
# ----------------------------
return contour_fpath, annot_fpath
# Compute Overlap with Annotation MEDLEY
# with open('melody_trackids.json', 'r') as fhandle:
# track_list = json.load(fhandle)
# EDIT Compute Overlap with Annotation Orchset
with open('melody_trackids_orch.json', 'r') as fhandle:
track_list = json.load(fhandle)
track_list = track_list['tracks']
overlap_results = {}
for test_track in track_list:
print test_track
cfpath, afpath = get_fpaths(test_track, meltype=meltype)
print cfpath
print afpath
overlap_results[test_track] = \
cc.contour_glass_ceiling(cfpath, afpath)
return overlap_results
def run_experiments(mel_type, outdir, olaps='all', decode='viterbi'):
if not os.path.exists(outdir):
os.mkdir(outdir)
# Compute Overlap with Annotation
# For MEDLEYDB
#with open('melody_trackids.json', 'r') as fhandle:
# track_list = json.load(fhandle)
# For Orchset
with open('melody_trackids_orch.json', 'r') as fhandle:
track_list = json.load(fhandle)
track_list = track_list['tracks']
dset_contour_dict, dset_annot_dict = \
eu.compute_all_overlaps(track_list, meltype=mel_type)
mdb_files, splitter = eu.create_splits(test_size=0.25)
split_num = 1
for train, test in splitter:
print "="*80
print "Processing split number %s" % split_num
print "="*80
outdir2 = os.path.join(outdir, 'splitnum_%s' % split_num)
if not os.path.exists(outdir2):
os.mkdir(outdir2)
outdir2 = os.path.join(outdir2)
split_num = split_num + 1
random.shuffle(train)
n_train = len(train) - (len(test)/2)
train_tracks = mdb_files[train[:n_train]]
valid_tracks = mdb_files[train[n_train:]]
test_tracks = mdb_files[test]
train_contour_dict = {k: dset_contour_dict[k] for k in train_tracks}
valid_contour_dict = {k: dset_contour_dict[k] for k in valid_tracks}
test_contour_dict = {k: dset_contour_dict[k] for k in test_tracks}
#train_annot_dict = {k: dset_annot_dict[k] for k in train_tracks}
valid_annot_dict = {k: dset_annot_dict[k] for k in valid_tracks}
test_annot_dict = {k: dset_annot_dict[k] for k in test_tracks}
anyContourDataFrame = dset_contour_dict[dset_contour_dict.keys()[0]]
feats, idxStartFeatures, idxEndFeatures = getFeatureInfo(anyContourDataFrame)
olap_stats, _ = eu.olap_stats(train_contour_dict)
fpath = os.path.join(outdir2, 'olap_stats.csv')
olap_stats.to_csv(fpath)
if olaps == 'all':
olap_list = np.arange(0, 1, 0.1)
else:
if mel_type == 1:
olap_list = [0.5]
else:
olap_list = [0.4]
for olap_thresh in olap_list:
try:
print '='*40
print "overlap threshold = %s" % olap_thresh
print '='*40
outdir3 = os.path.join(outdir2, 'olap_%s' % olap_thresh)
if not os.path.exists(outdir3):
os.mkdir(outdir3)
outdir3 = os.path.join(outdir3)
print "computing labels"
x_train, y_train, x_valid, y_valid, \
x_test, y_test, test_contour_dict = \
compute_labels(train_contour_dict, valid_contour_dict, \
test_contour_dict, olap_thresh)
print "training and scoring classifier"
clf, best_thresh = classifier(x_train, y_train, x_valid, y_valid,
x_test, y_test, outdir3)
#print "computing melody output"
#melody_output(clf, best_thresh, decode,
# valid_contour_dict, valid_annot_dict,
# test_contour_dict, test_annot_dict, outdir3, idxStartFeatures, idxEndFeatures)
# EDIT
#print "scoring with multivariate gaussian"
#multivariate_gaussian(x_train, y_train, x_test, y_test, outdir3)
except:
print "Error in run_experiments"
def compute_labels(train_contour_dict, valid_contour_dict, \
test_contour_dict, olap_thresh):
"""
"""
# Compute Labels using Overlap Threshold
train_contour_dict, valid_contour_dict, test_contour_dict = \
eu.label_all_contours(train_contour_dict, valid_contour_dict, \
test_contour_dict, olap_thresh=olap_thresh)
x_train, y_train = cc.pd_to_sklearn(train_contour_dict)
x_valid, y_valid = cc.pd_to_sklearn(valid_contour_dict)
x_test, y_test = cc.pd_to_sklearn(test_contour_dict)
return x_train, y_train, x_valid, y_valid, x_test, y_test, test_contour_dict
def multivariate_gaussian(x_train, y_train, x_test, y_test, outdir):
# Score with Multivariate Gaussian
# Transform data using boxcox transform, and fit multivariate gaussians.
x_train_boxcox, x_test_boxcox = mv.transform_features(x_train, x_test)
rv_pos, rv_neg = mv.fit_gaussians(x_train_boxcox, y_train)
# Compute melodiness scores on train and test set
m_train, m_test = mv.compute_all_melodiness(x_train_boxcox, x_test_boxcox,
rv_pos, rv_neg)
# Compute various metrics based on melodiness scores.
melodiness_scores = mv.melodiness_metrics(m_train, m_test, y_train, y_test)
best_thresh, max_fscore, thresh_plot_data = \
eu.get_best_threshold(y_test, m_test) # THIS SHOULD PROBABLY BE VALIDATION NUMBERS...
# thresh_plot_data = pd.DataFrame(np.array(thresh_plot_data).transpose(),
# columns=['recall', 'precision',
# 'thresh', 'f1'])
# fpath = os.path.join(outdir, 'thresh_plot_data.csv')
# thresh_plot_data.to_csv(fpath)
melodiness_scores = pd.DataFrame.from_dict(melodiness_scores)
fpath = os.path.join(outdir, 'melodiness_scores.csv')
melodiness_scores.to_csv(fpath)
print "Melodiness best thresh = %s" % best_thresh
print "Melodiness max f1 score = %s" % max_fscore
print "overall melodiness scores:"
print melodiness_scores
def classifier(x_train, y_train, x_valid, y_valid, x_test, y_test, outdir):
""" Train Classifier
"""
# Cross Validation
best_depth, _, cv_plot_data = cu.cross_val_sweep(x_train, y_train)
print "Classifier best depth = %s" % best_depth
cv_plot_data = pd.DataFrame(np.array(cv_plot_data).transpose(),
columns=['max depth', 'accuracy', 'std'])
fpath = os.path.join(outdir, 'cv_plot_data.csv')
cv_plot_data.to_csv(fpath)
# Training
clf = cu.train_clf(x_train, y_train, best_depth)
# Predict and Score
p_train, p_valid, p_test = cu.clf_predictions(x_train, x_valid, x_test, clf)
clf_scores = cu.clf_metrics(p_train, p_test, y_train, y_test)
print "Classifier scores:"
print clf_scores
# Get threshold that maximizes F1 score
best_thresh, max_fscore, thresh_plot_data = \
eu.get_best_threshold(y_valid, p_valid)
# thresh_plot_data = pd.DataFrame(np.array(thresh_plot_data).transpose(),
# columns=['recall', 'precision',
# 'thresh', 'f1'])
# fpath = os.path.join(outdir, 'thresh_plot_data.csv')
# thresh_plot_data.to_csv(fpath)
clf_scores = pd.DataFrame.from_dict(clf_scores)
fpath = os.path.join(outdir, 'classifier_scores.csv')
clf_scores.to_csv(fpath)
clf_outdir = os.path.join(outdir, 'classifier')
if not os.path.exists(clf_outdir):
os.mkdir(clf_outdir)
clf_fpath = os.path.join(clf_outdir, 'rf_clf.pkl')
joblib.dump(clf, clf_fpath)
print "Classifier best threshold = %s" % best_thresh
print "Classifier maximum f1 score = %s" % max_fscore
return clf, best_thresh
def melody_output(clf, best_thresh, decode,
valid_contour_dict, valid_annot_dict,
test_contour_dict, test_annot_dict, outdir,idxStartFeatures=0,idxEndFeatures=11):
""" Generate Melody Output
"""
# Add predicted melody probabilites to validation set contour data
for key in valid_contour_dict.keys():
valid_contour_dict[key] = eu.contour_probs(clf, valid_contour_dict[key],idxStartFeatures,idxEndFeatures)
# Add predicted melody probabilites to test set contour data
for key in test_contour_dict.keys():
test_contour_dict[key] = eu.contour_probs(clf, test_contour_dict[key],idxStartFeatures,idxEndFeatures)
meldir = os.path.join(outdir, 'melody_output')
if not os.path.exists(meldir):
os.mkdir(meldir)
meldir = os.path.join(meldir)
# Generate melody output using predictions
print "Generating Validation Melodies"
mel_valid_dict = {}
for key in valid_contour_dict.keys():
print key
mel_valid_dict[key] = gm.melody_from_clf(valid_contour_dict[key],
prob_thresh=best_thresh,
method=decode)
fpath = os.path.join(meldir, "%s_pred.csv" % key)
mel_valid_dict[key].to_csv(fpath, header=False, index=True)
# Score Melody Output
mel_scores = gm.score_melodies(mel_valid_dict, valid_annot_dict)
overall_scores = \
pd.DataFrame(columns=['VR', 'VFA', 'RPA', 'RCA', 'OA'],
index=mel_scores.keys())
overall_scores['VR'] = \
[mel_scores[key]['Voicing Recall'] for key in mel_scores.keys()]
overall_scores['VFA'] = \
[mel_scores[key]['Voicing False Alarm'] for key in mel_scores.keys()]
overall_scores['RPA'] = \
[mel_scores[key]['Raw Pitch Accuracy'] for key in mel_scores.keys()]
overall_scores['RCA'] = \
[mel_scores[key]['Raw Chroma Accuracy'] for key in mel_scores.keys()]
overall_scores['OA'] = \
[mel_scores[key]['Overall Accuracy'] for key in mel_scores.keys()]
scores_fpath = os.path.join(outdir, "validate_mel_scores.csv")
overall_scores.to_csv(scores_fpath)
score_summary = os.path.join(outdir, "validate_mel_score_summary.csv")
overall_scores.describe().to_csv(score_summary)
# Generate melody output using predictions
print "Generating Test Melodies"
mel_test_dict = {}
for key in test_contour_dict.keys():
print key
mel_test_dict[key] = gm.melody_from_clf(test_contour_dict[key],
prob_thresh=best_thresh,
method=decode)
fpath = os.path.join(meldir, "%s_pred.csv" % key)
mel_test_dict[key].to_csv(fpath, header=False, index=True)
# Score Melody Output
mel_scores = gm.score_melodies(mel_test_dict, test_annot_dict)
overall_scores = \
pd.DataFrame(columns=['VR', 'VFA', 'RPA', 'RCA', 'OA'],
index=mel_scores.keys())
overall_scores['VR'] = \
[mel_scores[key]['Voicing Recall'] for key in mel_scores.keys()]
overall_scores['VFA'] = \
[mel_scores[key]['Voicing False Alarm'] for key in mel_scores.keys()]
overall_scores['RPA'] = \
[mel_scores[key]['Raw Pitch Accuracy'] for key in mel_scores.keys()]
overall_scores['RCA'] = \
[mel_scores[key]['Raw Chroma Accuracy'] for key in mel_scores.keys()]
overall_scores['OA'] = \
[mel_scores[key]['Overall Accuracy'] for key in mel_scores.keys()]
scores_fpath = os.path.join(outdir, "all_mel_scores.csv")
overall_scores.to_csv(scores_fpath)
score_summary = os.path.join(outdir, "mel_score_summary.csv")
overall_scores.describe().to_csv(score_summary)
|
georgid/SourceFilterContoursMelody
|
src/contour_classification/run_experiments.py
|
Python
|
gpl-3.0
| 16,043
|
[
"Gaussian"
] |
2f9edbc854b75f3635cab64657f78d9402ae43d6672da98e6c16ac57621180a9
|
from robot import Robot
from math import copysign
class TheRobot(Robot):
'''Strategy:
Stay near the middle of the field,
Move if being attacked.
'''
def initialize(self):
self.health = 100
self._flee = 15
self._goto = 0
self._moveto_choices = [7, 7, -7, -7]
self._turnto_choices = [0, 90, 180, -90]
def respond(self):
self.scan_and_fire()
# Move away if damaged
health = self.sensors['HEALTH']
if health != self.health and not self._flee:
self._flee = 30
self._goto += 1
self._goto = self._goto % 4
self.health = health
self.turnto()
self.moveto()
def closest_turn(self, a):
'''return the smallest angle to turn to get to absolute angle a
should never return turn < -180 or turn > 180
'''
target = a % 360
current = self.sensors['GYRO']
turn = target - current
if turn > 180:
turn -= 360
elif turn < -180:
turn += 360
return turn
def turnto(self):
a = self._turnto_choices[self._goto]
err = -self.closest_turn(a)
if self._flee:
gain = 50
else:
gain = 1.5
self.torque(-gain * err)
def moveto(self):
# Move to the position set in self._moveto
moveto = self._moveto_choices[self._goto%4]
pos = self.sensors['POS']
if self._flee:
maxspeed = 100
gain = -16
self._flee -= 1
else:
maxspeed = 50
gain = 6
coord = pos[self._goto%2]
sign = [-1, -1, 1, 1][self._goto%4]
error = coord - moveto
force = max(min(maxspeed, sign * gain * error), -maxspeed)
self.force(force)
def scan_and_fire(self):
# Move the turret around, look for stuff and shoot it
self.turret(-75)
self.ping()
kind, angle, dist = self.sensors['PING']
if kind in 'r':
if dist > 4:
# Try not to blast yourself
self.fire(dist)
else:
self.fire()
|
jav/pybotwar
|
robots/examples/robot07.py
|
Python
|
gpl-3.0
| 2,223
|
[
"BLAST"
] |
c1df1cd634d324e6c311c79c25e651aaa9aa0129a5dd2d99509f2430530ba60e
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
DEBUG = True
# count of edges not cells
W=20+1
def d(args):
global DEBUG
if DEBUG:
print(args)
def printBoard(b):
cell = 0
for i in b:
print(i," \t", end='')
cell = cell + 1
if (cell == W):
print ('')
cell = 0
def xmain():
print(list(walk(21)))
def main():
global W, H
board = [0 for i in range(W*W)]
board[W*W-1] = 1
for i in walk(W):
cnt = 0
if (i % W) < (W-1):
cnt = cnt + board[i+1]
if i < (W*(W-1)):
cnt = cnt + board[i+W]
d('setting %i to %i' % (i, cnt))
board[i] = cnt
printBoard(board)
def walk(W):
# 0 1 2 3 4
# 5 6 7 8 9
# 0 1 2 3 4
# 5 6 7 8 9
# 0 1 2 3 4
# -> 24 23 19 22 18 14 21 17 13 9 20 16 12 8 4 15 11 7 3 10 6 2 5 1 0
# -1 -2+5 -3+10 -4+15 -4+15 -3+10 -2+5 -1
i = W*W-1
#yield i # do not visit last vertex
x = 0
y = -1
dy = 1
for t in range(W*W-1):
if i == (W-1):
dy = -1
i = i - x + (y * W)
elif i < (W-1):
x = x - 1
y = y + dy
i = i - x + (y * W)
elif (i % W) == (W-1):
x = x + 1
y = y + dy
i = i - x + (y * W)
else:
i = i - (W-1)
yield i
if __name__ =='__main__':main()
|
anomen-s/programming-challenges
|
projecteuler.net/0015-Lattice_paths/solve.py
|
Python
|
gpl-2.0
| 1,347
|
[
"VisIt"
] |
10706785b87b2cc827911cb853013376d7bdeb727d4e4c96876b42652543e88f
|
"""Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u"\N{MATHEMATICAL LEFT ANGLE BRACKET}"
_rbracket_ucode = u"\N{MATHEMATICAL RIGHT ANGLE BRACKET}"
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u"\N{LIGHT VERTICAL BAR}"
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construct the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u'\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}', \
u'\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}', \
u'\N{BOX DRAWINGS LIGHT VERTICAL}'
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in {_lbracket, _lbracket_ucode}:
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in {_rbracket, _rbracket_ucode}:
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in {_straight_bracket, _straight_bracket_ucode}:
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product between this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] https://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] https://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Wavefunction, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const is oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
r"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
|
kaushik94/sympy
|
sympy/physics/quantum/state.py
|
Python
|
bsd-3-clause
| 29,163
|
[
"DIRAC"
] |
399ea47599be53c4a7dcaba058c2ec4c8b16af77a1a78edfdd6e4df40871ad31
|
#!/usr/bin/python
"""Image smoothing filter."""
# build-in modules
# third-party modules
import numpy
from scipy.ndimage.filters import gaussian_filter
# path changes
# own modules
# information
__author__ = "Oskar Maier and others (see below)"
__version__ = "r0.3, 2013-08-23"
__email__ = "oskar.maier@googlemail.com"
__status__ = "Release"
__description__ = "Image smoothing filters."
# code
def gauss_xminus1d(img, sigma, dim=2):
"""
Applies a X-1D gauss to a copy of a XD image, slicing it along dim.
Essentially uses scipy.ndimage.filters.gaussian_filter, but applies it to a dimension
less than the image has.
@param img the image to smooth
@type img ndarray
@param sigma the sigma i.e. gaussian kernel size in pixel
@type sigma int
@param dim the dimension to ignore
@type dim int
"""
img = numpy.array(img, copy=False)
return __xminus1d(img, gaussian_filter, dim, sigma=sigma)
def anisotropic_diffusion(img, niter=1, kappa=50, gamma=0.1, voxelspacing=None, option=1):
"""
XD Anisotropic diffusion.
Usage:
out = anisodiff(img, niter, kappa, gamma, voxelspacing, option)
Arguments:
img - input image (will be cast to numpy.float)
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
voxelspacing - tuple, the distance between adjacent pixels in all img.ndim directions
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
Returns:
out - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x,y and/or z axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
P. Perona and J. Malik.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by Peter Kovesi
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by Alistair Muldal
Department of Pharmacology
University of Oxford
<alistair.muldal@pharm.ox.ac.uk>
Adapted to arbitrary dimensionality and added to the MedPy library Oskar Maier
Institute for Medical Informatics
Universitaet Luebeck
<oskar.maier@googlemail.com>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
August 2013 incorporated into MedPy, arbitrary dimensionality
"""
# define conduction gradients functions
if option == 1:
def condgradient(delta, spacing):
return numpy.exp(-(delta/kappa)**2.)/float(spacing)
elif option == 2:
def condgradient(delta, spacing):
return 1./(1.+(delta/kappa)**2.)/float(spacing)
# initialize output array
out = numpy.array(img, dtype=numpy.float32, copy=True)
# set default voxel spacong if not suppliec
if None == voxelspacing:
voxelspacing = tuple([1.] * img.ndim)
# initialize some internal variables
deltas = [numpy.zeros_like(out) for _ in xrange(out.ndim)]
for _ in xrange(niter):
# calculate the diffs
for i in xrange(out.ndim):
slicer = [slice(None, -1) if j == i else slice(None) for j in xrange(out.ndim)]
deltas[i][slicer] = numpy.diff(out, axis=i)
# update matrices
matrices = [condgradient(delta, spacing) * delta for delta, spacing in zip(deltas, voxelspacing)]
# subtract a copy that has been shifted ('Up/North/West' in 3D case) by one
# pixel. Don't as questions. just do it. trust me.
for i in xrange(out.ndim):
slicer = [slice(1, None) if j == i else slice(None) for j in xrange(out.ndim)]
matrices[i][slicer] = numpy.diff(matrices[i], axis=i)
# update the image
out += gamma * (numpy.sum(matrices, axis=0))
return out
def __xminus1d(img, fun, dim, *args, **kwargs):
"""
Applies the function fun along all all X-1D dimensional volumes of the images img
dimension dim.
E.g. you want to apply a gauss filter to each slice of a 3D MRI brain image,
simply supply the function as fun, the image as img and the dimension along which
to iterate as dim.
With *args and **kwargs, arguments can be passed to the function fun.
"""
slicer = [slice(None)] * img.ndim
output = []
for slid in range(img.shape[dim]):
slicer[dim] = slice(slid, slid + 1)
output.append(fun(numpy.squeeze(img[slicer]), *args, **kwargs))
return numpy.rollaxis(numpy.asarray(output), 0, dim + 1)
|
kleinfeld/medpy
|
medpy/filter/smoothing.py
|
Python
|
gpl-3.0
| 5,384
|
[
"Gaussian"
] |
11270f0101e07b33c317ac97d082b6c2b915113f532b95e575e7fb013236a1f2
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Set up a Lennard-Jones fluid maintained at a fixed temperature by a
Langevin thermostat. The particles in the system are of two types:
type 0 and type 1. Type 0 particles interact with each other via a
repulsive WCA interaction. Type 1 particles neither interact with
themselves nor with type 0 particles. The distribution of minimum
distances between particles of type 0 and type 1 is recorded with
:meth:`~espressomd.analyze.Analysis.distribution`.
See :ref:`Particle distribution`.
"""
import numpy as np
import espressomd
required_features = ["LENNARD_JONES"]
espressomd.assert_features(required_features)
print("""
=======================================================
= lj_liquid_distribution.py =
=======================================================
""")
# System parameters
#############################################################
box_l = 10.7437
density = 0.7
# Interaction parameters (repulsive Lennard-Jones)
#############################################################
lj_eps = 1.0
lj_sig = 1.0
lj_cut = 2.5 * lj_sig
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l] * 3)
np.random.seed(seed=42)
system.time_step = 0.01
system.cell_system.skin = 0.4
# warmup integration (steepest descent)
warm_steps = 20
warm_n_times = 10
# convergence criterion (particles are separated by at least 90% sigma)
min_dist = 0.9 * lj_sig
# integration
int_steps = 1000
int_n_times = 5
#############################################################
# Setup System #
#############################################################
# distribution file
distr_type_list_a = [0]
distr_type_list_b = [1]
distr_r_min = 0.1
distr_r_max = box_l / 2.0
distr_r_bins = 200
distr_log_flag = False
distr_int_flag = True
distr_r = np.zeros(distr_r_bins)
distr_values = np.zeros(distr_r_bins)
# Interaction setup
#############################################################
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
print("LJ-parameters:")
print(system.non_bonded_inter[0, 0].lennard_jones.get_params())
# Particle setup
#############################################################
volume = box_l**3
n_part = int(volume * density)
for i in range(n_part):
if i < n_part / 2.0:
system.part.add(type=0, pos=np.random.random(3) * system.box_l)
else:
system.part.add(type=1, pos=np.random.random(3) * system.box_l)
print(
f"Simulate {n_part} particles in a cubic box of length {box_l} at density {density}.")
print("Interactions:\n")
act_min_dist = system.analysis.min_dist()
print(f"Start with minimal distance {act_min_dist}")
#############################################################
# Warmup Integration #
#############################################################
print(f"""\
Start warmup integration:
At maximum {warm_n_times} times {warm_steps} steps
Stop if minimal distance is larger than {min_dist}""")
print(system.non_bonded_inter[0, 0].lennard_jones)
# minimize energy using min_dist as the convergence criterion
system.integrator.set_steepest_descent(f_max=0, gamma=1e-3,
max_displacement=lj_sig / 100)
i = 0
while i < warm_n_times and system.analysis.min_dist() < min_dist:
print(f"minimization: {system.analysis.energy()['total']:+.2e}")
system.integrator.run(warm_steps)
i += 1
print(f"minimization: {system.analysis.energy()['total']:+.2e}")
print()
system.integrator.set_vv()
# activate thermostat
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# Just to see what else we may get from the C++ core
import pprint
pprint.pprint(system.cell_system.get_state(), width=1)
# pprint.pprint(system.part.__getstate__(), width=1)
pprint.pprint(system.__getstate__())
#############################################################
# Integration #
#############################################################
print(f"\nStart integration: run {int_n_times} times {int_steps} steps")
for i in range(int_n_times):
print(f"run {i} at time={system.time:.2f}")
system.integrator.run(int_steps)
r, dist = system.analysis.distribution(
type_list_a=distr_type_list_a, type_list_b=distr_type_list_b,
r_min=distr_r_min, r_max=distr_r_max, r_bins=distr_r_bins,
log_flag=distr_log_flag, int_flag=distr_int_flag)
distr_r = r
distr_values += dist
energies = system.analysis.energy()
print(energies['total'])
linear_momentum = system.analysis.linear_momentum()
print(linear_momentum)
# rescale distribution values and write out data
distr_values /= int_n_times
table = np.column_stack([distr_r, distr_values])
np.savetxt("pylj_liquid_distribution.tsv", table, delimiter='\t',
fmt='%.5e', header="r,distribution")
# reload: distr_r, distr_values = np.loadtxt("pylj_liquid_distribution.tsv").T
# terminate program
print("\nFinished.")
|
espressomd/espresso
|
samples/lj_liquid_distribution.py
|
Python
|
gpl-3.0
| 5,841
|
[
"ESPResSo"
] |
42ff0ed3d64f153f2103e605f5c684048ca79d1e511bfe4dd4c3ae994ca215e9
|
# encoding: utf-8
"""nanoparticle.py - Window for setting up crystalline nanoparticles.
"""
import gtk
from gettext import gettext as _
from copy import copy
from ase.gui.widgets import pack, cancel_apply_ok, oops, help
from ase.gui.setupwindow import SetupWindow
from ase.gui.pybutton import PyButton
import ase
import ase.data
import numpy as np
# Delayed imports:
# ase.cluster.data
from ase.cluster.cubic import FaceCenteredCubic, BodyCenteredCubic, SimpleCubic
from ase.cluster.hexagonal import HexagonalClosedPacked, Graphite
from ase.cluster import wulff_construction
introtext = _("""\
Create a nanoparticle either by specifying the number of layers, or using the
Wulff construction. Please press the [Help] button for instructions on how to
specify the directions.
WARNING: The Wulff construction currently only works with cubic crystals!
""")
helptext = _("""
The nanoparticle module sets up a nano-particle or a cluster with a given
crystal structure.
1) Select the element, the crystal structure and the lattice constant(s).
The [Get structure] button will find the data for a given element.
2) Choose if you want to specify the number of layers in each direction, or if
you want to use the Wulff construction. In the latter case, you must specify
surface energies in each direction, and the size of the cluster.
How to specify the directions:
------------------------------
First time a direction appears, it is interpreted as the entire family of
directions, i.e. (0,0,1) also covers (1,0,0), (-1,0,0) etc. If one of these
directions is specified again, the second specification overrules that specific
direction. For this reason, the order matters and you can rearrange the
directions with the [Up] and [Down] keys. You can also add a new direction,
remember to press [Add] or it will not be included.
Example: (1,0,0) (1,1,1), (0,0,1) would specify the {100} family of directions,
the {111} family and then the (001) direction, overruling the value given for
the whole family of directions.
""")
py_template_layers = """
import ase
%(import)s
surfaces = %(surfaces)s
layers = %(layers)s
lc = %(latconst)s
atoms = %(factory)s('%(element)s', surfaces, layers, latticeconstant=lc)
# OPTIONAL: Cast to ase.Atoms object, discarding extra information:
# atoms = ase.Atoms(atoms)
"""
py_template_wulff = """
import ase
from ase.cluster import wulff_construction
surfaces = %(surfaces)s
esurf = %(energies)s
lc = %(latconst)s
size = %(natoms)s # Number of atoms
atoms = wulff_construction('%(element)s', surfaces, esurf, size, '%(structure)s',
rounding='%(rounding)s', latticeconstant=lc)
# OPTIONAL: Cast to ase.Atoms object, discarding extra information:
# atoms = ase.Atoms(atoms)
"""
class SetupNanoparticle(SetupWindow):
"Window for setting up a nanoparticle."
# Structures: Abbreviation, name, 4-index (boolean), two lattice const (bool), factory
structure_data = (('fcc', _('Face centered cubic (fcc)'), False, False, FaceCenteredCubic),
('bcc', _('Body centered cubic (bcc)'), False, False, BodyCenteredCubic),
('sc', _('Simple cubic (sc)'), False, False, SimpleCubic),
('hcp', _('Hexagonal closed-packed (hcp)'), True, True, HexagonalClosedPacked),
('graphite', _('Graphite'), True, True, Graphite),
)
#NB: HCP is broken!
# A list of import statements for the Python window.
import_names = {'fcc': 'from ase.cluster.cubic import FaceCenteredCubic',
'bcc': 'from ase.cluster.cubic import BodyCenteredCubic',
'sc': 'from ase.cluster.cubic import SimpleCubic',
'hcp': 'from ase.cluster.hexagonal import HexagonalClosedPacked',
'graphite': 'from ase.cluster.hexagonal import Graphite',
}
# Default layer specifications for the different structures.
default_layers = {'fcc': [( (1,0,0), 6),
( (1,1,0), 9),
( (1,1,1), 5)],
'bcc': [( (1,0,0), 6),
( (1,1,0), 9),
( (1,1,1), 5)],
'sc': [( (1,0,0), 6),
( (1,1,0), 9),
( (1,1,1), 5)],
'hcp': [( (0,0,0,1), 5),
( (1,0,-1,0), 5)],
'graphite': [( (0,0,0,1), 5),
( (1,0,-1,0), 5)]
}
def __init__(self, gui):
SetupWindow.__init__(self)
self.set_title(_("Nanoparticle"))
self.atoms = None
self.no_update = True
vbox = gtk.VBox()
# Intoductory text
self.packtext(vbox, introtext)
# Choose the element
label = gtk.Label(_("Element: "))
label.set_alignment(0.0, 0.2)
element = gtk.Entry(max=3)
self.element = element
lattice_button = gtk.Button(_("Get structure"))
lattice_button.connect('clicked', self.set_structure_data)
self.elementinfo = gtk.Label(" ")
pack(vbox, [label, element, self.elementinfo, lattice_button], end=True)
self.element.connect('activate', self.update)
self.legal_element = False
# The structure and lattice constant
label = gtk.Label(_("Structure: "))
self.structure = gtk.combo_box_new_text()
self.list_of_structures = []
self.needs_4index = {}
self.needs_2lat = {}
self.factory = {}
for abbrev, name, n4, c, factory in self.structure_data:
self.structure.append_text(name)
self.list_of_structures.append(abbrev)
self.needs_4index[abbrev] = n4
self.needs_2lat[abbrev] = c
self.factory[abbrev] = factory
self.structure.set_active(0)
self.fourindex = self.needs_4index[self.list_of_structures[0]]
self.structure.connect('changed', self.update_structure)
label2 = gtk.Label(_("Lattice constant: a ="))
self.lattice_const_a = gtk.Adjustment(3.0, 0.0, 1000.0, 0.01)
self.lattice_const_c = gtk.Adjustment(5.0, 0.0, 1000.0, 0.01)
self.lattice_box_a = gtk.SpinButton(self.lattice_const_a, 10.0, 3)
self.lattice_box_c = gtk.SpinButton(self.lattice_const_c, 10.0, 3)
self.lattice_box_a.numeric = True
self.lattice_box_c.numeric = True
self.lattice_label_c = gtk.Label(" c =")
pack(vbox, [label, self.structure])
pack(vbox, [label2, self.lattice_box_a,
self.lattice_label_c, self.lattice_box_c])
self.lattice_label_c.hide()
self.lattice_box_c.hide()
self.lattice_const_a.connect('value-changed', self.update)
self.lattice_const_c.connect('value-changed', self.update)
# Choose specification method
label = gtk.Label(_("Method: "))
self.method = gtk.combo_box_new_text()
for meth in (_("Layer specification"), _("Wulff construction")):
self.method.append_text(meth)
self.method.set_active(0)
self.method.connect('changed', self.update_gui_method)
pack(vbox, [label, self.method])
pack(vbox, gtk.Label(""))
self.old_structure = None
frame = gtk.Frame()
pack(vbox, frame)
framebox = gtk.VBox()
frame.add(framebox)
framebox.show()
self.layerlabel = gtk.Label("Missing text") # Filled in later
pack(framebox, [self.layerlabel])
# This box will contain a single table that is replaced when
# the list of directions is changed.
self.direction_table_box = gtk.VBox()
pack(framebox, self.direction_table_box)
pack(self.direction_table_box,
gtk.Label(_("Dummy placeholder object")))
pack(framebox, gtk.Label(""))
pack(framebox, [gtk.Label(_("Add new direction:"))])
self.newdir_label = []
self.newdir_box = []
self.newdir_index = []
packlist = []
for txt in ('(', ', ', ', ', ', '):
self.newdir_label.append(gtk.Label(txt))
adj = gtk.Adjustment(0, -100, 100, 1)
self.newdir_box.append(gtk.SpinButton(adj, 1, 0))
self.newdir_index.append(adj)
packlist.append(self.newdir_label[-1])
packlist.append(self.newdir_box[-1])
self.newdir_layers = gtk.Adjustment(5, 0, 100, 1)
self.newdir_layers_box = gtk.SpinButton(self.newdir_layers, 1, 0)
self.newdir_esurf = gtk.Adjustment(1.0, 0, 1000.0, 0.1)
self.newdir_esurf_box = gtk.SpinButton(self.newdir_esurf, 10, 3)
addbutton = gtk.Button(_("Add"))
addbutton.connect('clicked', self.row_add)
packlist.extend([gtk.Label("): "),
self.newdir_layers_box,
self.newdir_esurf_box,
gtk.Label(" "),
addbutton])
pack(framebox, packlist)
self.defaultbutton = gtk.Button(_("Set all directions to default "
"values"))
self.defaultbutton.connect('clicked', self.default_direction_table)
self.default_direction_table()
# Extra widgets for the Wulff construction
self.wulffbox = gtk.VBox()
pack(vbox, self.wulffbox)
label = gtk.Label(_("Particle size: "))
self.size_n_radio = gtk.RadioButton(None, _("Number of atoms: "))
self.size_n_radio.set_active(True)
self.size_n_adj = gtk.Adjustment(100, 1, 100000, 1)
self.size_n_spin = gtk.SpinButton(self.size_n_adj, 0, 0)
self.size_dia_radio = gtk.RadioButton(self.size_n_radio,
_("Volume: "))
self.size_dia_adj = gtk.Adjustment(1.0, 0, 100.0, 0.1)
self.size_dia_spin = gtk.SpinButton(self.size_dia_adj, 10.0, 2)
pack(self.wulffbox, [label, self.size_n_radio, self.size_n_spin,
gtk.Label(" "), self.size_dia_radio, self.size_dia_spin,
gtk.Label(_(u"ų"))])
self.size_n_radio.connect("toggled", self.update_gui_size)
self.size_dia_radio.connect("toggled", self.update_gui_size)
self.size_n_adj.connect("value-changed", self.update_size_n)
self.size_dia_adj.connect("value-changed", self.update_size_dia)
label = gtk.Label(_("Rounding: If exact size is not possible, "
"choose the size"))
pack(self.wulffbox, [label])
self.round_above = gtk.RadioButton(None, _("above "))
self.round_below = gtk.RadioButton(self.round_above, _("below "))
self.round_closest = gtk.RadioButton(self.round_above, _("closest "))
self.round_closest.set_active(True)
butbox = gtk.HButtonBox()
self.smaller_button = gtk.Button(_("Smaller"))
self.larger_button = gtk.Button(_("Larger"))
self.smaller_button.connect('clicked', self.wulff_smaller)
self.larger_button.connect('clicked', self.wulff_larger)
pack(butbox, [self.smaller_button, self.larger_button])
buts = [self.round_above, self.round_below, self.round_closest]
for b in buts:
b.connect("toggled", self.update)
buts.append(butbox)
pack(self.wulffbox, buts, end=True)
# Information
pack(vbox, gtk.Label(""))
infobox = gtk.VBox()
label1 = gtk.Label(_("Number of atoms: "))
self.natoms_label = gtk.Label("-")
label2 = gtk.Label(_(" Approx. diameter: "))
self.dia1_label = gtk.Label("-")
pack(infobox, [label1, self.natoms_label, label2, self.dia1_label])
pack(infobox, gtk.Label(""))
infoframe = gtk.Frame(_("Information about the created cluster:"))
infoframe.add(infobox)
infobox.show()
pack(vbox, infoframe)
# Buttons
self.pybut = PyButton(_("Creating a nanoparticle."))
self.pybut.connect('clicked', self.makeatoms)
helpbut = help(helptext)
buts = cancel_apply_ok(cancel=lambda widget: self.destroy(),
apply=self.apply,
ok=self.ok)
pack(vbox, [self.pybut, helpbut, buts], end=True, bottom=True)
self.auto = gtk.CheckButton(_("Automatic Apply"))
fr = gtk.Frame()
fr.add(self.auto)
fr.show_all()
pack(vbox, [fr], end=True, bottom=True)
# Finalize setup
self.update_structure()
self.update_gui_method()
self.add(vbox)
vbox.show()
self.show()
self.gui = gui
self.no_update = False
def default_direction_table(self, widget=None):
"Set default directions and values for the current crystal structure."
self.direction_table = []
struct = self.get_structure()
for direction, layers in self.default_layers[struct]:
adj1 = gtk.Adjustment(layers, -100, 100, 1)
adj2 = gtk.Adjustment(1.0, -1000.0, 1000.0, 0.1)
adj1.connect("value-changed", self.update)
adj2.connect("value-changed", self.update)
self.direction_table.append([direction, adj1, adj2])
self.update_direction_table()
def update_direction_table(self):
"Update the part of the GUI containing the table of directions."
#Discard old table
oldwidgets = self.direction_table_box.get_children()
assert len(oldwidgets) == 1
oldwidgets[0].hide()
self.direction_table_box.remove(oldwidgets[0])
del oldwidgets # It should now be gone
tbl = gtk.Table(len(self.direction_table)+1, 7)
pack(self.direction_table_box, [tbl])
for i, data in enumerate(self.direction_table):
tbl.attach(gtk.Label("%s: " % (str(data[0]),)),
0, 1, i, i+1)
if self.method.get_active():
# Wulff construction
spin = gtk.SpinButton(data[2], 1.0, 3)
else:
# Layers
spin = gtk.SpinButton(data[1], 1, 0)
tbl.attach(spin, 1, 2, i, i+1)
tbl.attach(gtk.Label(" "), 2, 3, i, i+1)
but = gtk.Button(_("Up"))
but.connect("clicked", self.row_swap_next, i-1)
if i == 0:
but.set_sensitive(False)
tbl.attach(but, 3, 4, i, i+1)
but = gtk.Button(_("Down"))
but.connect("clicked", self.row_swap_next, i)
if i == len(self.direction_table)-1:
but.set_sensitive(False)
tbl.attach(but, 4, 5, i, i+1)
but = gtk.Button(_("Delete"))
but.connect("clicked", self.row_delete, i)
if len(self.direction_table) == 1:
but.set_sensitive(False)
tbl.attach(but, 5, 6, i, i+1)
tbl.show_all()
self.update()
def get_structure(self):
"Returns the crystal structure chosen by the user."
return self.list_of_structures[self.structure.get_active()]
def update_structure(self, widget=None):
"Called when the user changes the structure."
s = self.get_structure()
if s != self.old_structure:
old4 = self.fourindex
self.fourindex = self.needs_4index[s]
if self.fourindex != old4:
# The table of directions is invalid.
self.default_direction_table()
self.old_structure = s
if self.needs_2lat[s]:
self.lattice_label_c.show()
self.lattice_box_c.show()
else:
self.lattice_label_c.hide()
self.lattice_box_c.hide()
if self.fourindex:
self.newdir_label[3].show()
self.newdir_box[3].show()
else:
self.newdir_label[3].hide()
self.newdir_box[3].hide()
self.update()
def update_gui_method(self, widget=None):
"Switch between layer specification and Wulff construction."
self.update_direction_table()
if self.method.get_active():
self.wulffbox.show()
self.layerlabel.set_text(_("Surface energies (as energy/area, "
"NOT per atom):"))
self.newdir_layers_box.hide()
self.newdir_esurf_box.show()
else:
self.wulffbox.hide()
self.layerlabel.set_text(_("Number of layers:"))
self.newdir_layers_box.show()
self.newdir_esurf_box.hide()
self.update()
def wulff_smaller(self, widget=None):
"Make a smaller Wulff construction."
n = len(self.atoms)
self.size_n_radio.set_active(True)
self.size_n_adj.value = n-1
self.round_below.set_active(True)
self.apply()
def wulff_larger(self, widget=None):
"Make a larger Wulff construction."
n = len(self.atoms)
self.size_n_radio.set_active(True)
self.size_n_adj.value = n+1
self.round_above.set_active(True)
self.apply()
def row_add(self, widget=None):
"Add a row to the list of directions."
if self.fourindex:
n = 4
else:
n = 3
idx = tuple( [int(a.value) for a in self.newdir_index[:n]] )
if not np.array(idx).any():
oops(_("At least one index must be non-zero"))
return
if n == 4 and np.array(idx)[:3].sum() != 0:
oops(_("Invalid hexagonal indices",
"The sum of the first three numbers must be zero"))
return
adj1 = gtk.Adjustment(self.newdir_layers.value, -100, 100, 1)
adj2 = gtk.Adjustment(self.newdir_esurf.value, -1000.0, 1000.0, 0.1)
adj1.connect("value-changed", self.update)
adj2.connect("value-changed", self.update)
self.direction_table.append([idx, adj1, adj2])
self.update_direction_table()
def row_delete(self, widget, row):
del self.direction_table[row]
self.update_direction_table()
def row_swap_next(self, widget, row):
dt = self.direction_table
dt[row], dt[row+1] = dt[row+1], dt[row]
self.update_direction_table()
def update_gui_size(self, widget=None):
"Update gui when the cluster size specification changes."
self.size_n_spin.set_sensitive(self.size_n_radio.get_active())
self.size_dia_spin.set_sensitive(self.size_dia_radio.get_active())
def update_size_n(self, widget=None):
if not self.size_n_radio.get_active():
return
at_vol = self.get_atomic_volume()
dia = 2.0 * (3 * self.size_n_adj.value * at_vol / (4 * np.pi))**(1.0/3)
self.size_dia_adj.value = dia
self.update()
def update_size_dia(self, widget=None):
if not self.size_dia_radio.get_active():
return
at_vol = self.get_atomic_volume()
n = round(np.pi / 6 * self.size_dia_adj.value**3 / at_vol)
self.size_n_adj.value = n
self.update()
def update(self, *args):
if self.no_update:
return
self.update_element()
if self.auto.get_active():
self.makeatoms()
if self.atoms is not None:
self.gui.new_atoms(self.atoms)
else:
self.clearatoms()
self.makeinfo()
def set_structure_data(self, *args):
"Called when the user presses [Get structure]."
if not self.update_element():
oops(_("Invalid element."))
return
z = ase.data.atomic_numbers[self.legal_element]
ref = ase.data.reference_states[z]
if ref is None:
structure = None
else:
structure = ref['symmetry']
if ref is None or not structure in self.list_of_structures:
oops(_("Unsupported or unknown structure",
"Element = %s, structure = %s" % (self.legal_element,
structure)))
return
for i, s in enumerate(self.list_of_structures):
if structure == s:
self.structure.set_active(i)
a = ref['a']
self.lattice_const_a.set_value(a)
self.fourindex = self.needs_4index[structure]
if self.fourindex:
try:
c = ref['c']
except KeyError:
c = ref['c/a'] * a
self.lattice_const_c.set_value(c)
self.lattice_label_c.show()
self.lattice_box_c.show()
else:
self.lattice_label_c.hide()
self.lattice_box_c.hide()
def makeatoms(self, *args):
"Make the atoms according to the current specification."
if not self.update_element():
self.clearatoms()
self.makeinfo()
return False
assert self.legal_element is not None
struct = self.list_of_structures[self.structure.get_active()]
if self.needs_2lat[struct]:
# a and c lattice constants
lc = {'a': self.lattice_const_a.value,
'c': self.lattice_const_c.value}
lc_str = str(lc)
else:
lc = self.lattice_const_a.value
lc_str = "%.5f" % (lc,)
if self.method.get_active() == 0:
# Layer-by-layer specification
surfaces = [x[0] for x in self.direction_table]
layers = [int(x[1].value) for x in self.direction_table]
self.atoms = self.factory[struct](self.legal_element, copy(surfaces),
layers, latticeconstant=lc)
imp = self.import_names[struct]
self.pybut.python = py_template_layers % {'import': imp,
'element': self.legal_element,
'surfaces': str(surfaces),
'layers': str(layers),
'latconst': lc_str,
'factory': imp.split()[-1]
}
else:
# Wulff construction
assert self.method.get_active() == 1
surfaces = [x[0] for x in self.direction_table]
surfaceenergies = [x[2].value for x in self.direction_table]
self.update_size_dia()
if self.round_above.get_active():
rounding = "above"
elif self.round_below.get_active():
rounding = "below"
elif self.round_closest.get_active():
rounding = "closest"
else:
raise RuntimeError("No rounding!")
self.atoms = wulff_construction(self.legal_element, surfaces,
surfaceenergies,
self.size_n_adj.value,
self.factory[struct],
rounding, lc)
self.pybut.python = py_template_wulff % {'element': self.legal_element,
'surfaces': str(surfaces),
'energies': str(surfaceenergies),
'latconst': lc_str,
'natoms': self.size_n_adj.value,
'structure': struct,
'rounding': rounding
}
self.makeinfo()
def clearatoms(self):
self.atoms = None
self.pybut.python = None
def get_atomic_volume(self):
s = self.list_of_structures[self.structure.get_active()]
a = self.lattice_const_a.value
c = self.lattice_const_c.value
if s == 'fcc':
return a**3 / 4
elif s == 'bcc':
return a**3 / 2
elif s == 'sc':
return a**3
elif s == 'hcp':
return np.sqrt(3.0)/2 * a * a * c / 2
elif s == 'graphite':
return np.sqrt(3.0)/2 * a * a * c / 4
else:
raise RuntimeError("Unknown structure: "+s)
def makeinfo(self):
"""Fill in information field about the atoms.
Also turns the Wulff construction buttons [Larger] and
[Smaller] on and off.
"""
if self.atoms is None:
self.natoms_label.set_label("-")
self.dia1_label.set_label("-")
self.smaller_button.set_sensitive(False)
self.larger_button.set_sensitive(False)
else:
self.natoms_label.set_label(str(len(self.atoms)))
at_vol = self.get_atomic_volume()
dia = 2 * (3 * len(self.atoms) * at_vol / (4 * np.pi))**(1.0/3.0)
self.dia1_label.set_label(_(u"%.1f Å") % (dia,))
self.smaller_button.set_sensitive(True)
self.larger_button.set_sensitive(True)
def apply(self, *args):
self.makeatoms()
if self.atoms is not None:
self.gui.new_atoms(self.atoms)
return True
else:
oops(_("No valid atoms."),
_("You have not (yet) specified a consistent set of "
"parameters."))
return False
def ok(self, *args):
if self.apply():
self.destroy()
|
grhawk/ASE
|
tools/ase/gui/nanoparticle.py
|
Python
|
gpl-2.0
| 25,943
|
[
"ASE",
"CRYSTAL"
] |
6507914c5490a71c5600f7c3d0ae8273188ff7d0ce6da93e19a128a93985396e
|
"""
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, re, atexit, io
import py
from py._path import common
from py._path.common import iswin32
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py.error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
elif isinstance(path, common.PathBase):
self.strpath = path.strpath
elif isinstance(path, py.builtin._basestring):
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
else:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = self.strpath
s2 = getattr(other, "strpath", other)
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.strpath < getattr(other, "strpath", other)
def __gt__(self, other):
return self.strpath > getattr(other, "strpath", other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = getattr(other, "strpath", other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(448, rec=1) # octcal 0700
py.error.checked_call(py.std.shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(448) # octcal 0700
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename,ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
if ('dirname' in kw and not kw['dirname']):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(
"%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec):
""" see new for what 'spec' can be. """
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(',') )
append = res.append
for name in args:
if name == 'drive':
append(parts[0])
elif name == 'dirname':
append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == 'basename':
append(basename)
else:
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
append(purebasename)
elif name == 'ext':
append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [getattr(arg, "strpath", arg) for arg in args]
strpath = self.strpath
if kwargs.get('abs'):
newargs = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = strpath + sep + arg
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return py.error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
return super(LocalPath, self).check(**kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = py.error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, py.builtin._basestring):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = common.FNMatcher(fil)
names = py.error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self):
""" return size of the underlying file object """
return self.stat().size
def mtime(self):
""" return last modification time of the path. """
return self.stat().mtime
def copy(self, target, mode=False):
""" copy path to target."""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self!=target
copychunked(self, target)
if mode:
copymode(self, target)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x, newx)
def rename(self, target):
""" rename this path to target. """
target = getattr(target, "strpath", target)
return py.error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
""" pickle object into path location"""
f = self.open('wb')
try:
py.error.checked_call(py.std.pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
""" create & return the directory joined with args. """
p = self.join(*args)
py.error.checked_call(os.mkdir, getattr(p, "strpath", p))
return p
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if 'b' in mode:
if not py.builtin._isbytes(data):
raise ValueError("can only process bytes")
else:
if not py.builtin._istext(data):
if not py.builtin._isbytes(data):
data = str(data)
else:
data = py.builtin._totext(data, sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except py.error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open('w').close()
return p
def stat(self, raising=True):
""" Return an os.stat() tuple. """
if raising == True:
return Stat(self, py.error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self):
""" Return an os.lstat() tuple. """
return Stat(self, py.error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return py.error.checked_call(os.utime, self.strpath, mtime)
try:
return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
except py.error.EINVAL:
return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
""" change directory to self and return old current directory """
try:
old = self.__class__()
except py.error.ENOENT:
old = None
py.error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
""" return context manager which changes to current dir during the
managed "with" context. On __enter__ it returns the old dir.
"""
old = self.chdir()
try:
yield old
finally:
old.chdir()
def realpath(self):
""" return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
""" return last access time of the path. """
return self.stat().atime
def __repr__(self):
return 'local(%r)' % self.strpath
def __str__(self):
""" return string representation of the Path. """
return self.strpath
def pypkgpath(self, pkgname=None):
""" return the Python package path by looking for a
pkgname. If pkgname is None look for the last
directory upwards which still contains an __init__.py
and whose basename is python-importable.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if pkgname is None:
if parent.check(file=1):
continue
if not isimportable(parent.basename):
break
if parent.join('__init__.py').check():
pkgpath = parent
continue
return pkgpath
else:
if parent.basename == pkgname:
return parent
return pkgpath
def _prependsyspath(self, path):
s = str(path)
if s != sys.path[0]:
#print "prepending to sys.path", s
sys.path.insert(0, s)
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError("mode %r must be an integer" % (mode,))
if rec:
for x in self.visit(rec=rec):
py.error.checked_call(os.chmod, str(x), mode)
py.error.checked_call(os.chmod, str(self), mode)
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
if modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
"""
if not self.check():
raise py.error.ENOENT(self)
#print "trying to import", self
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
if ensuresyspath:
self._prependsyspath(pkgpath.dirpath())
__import__(pkgpath.basename)
pkg = sys.modules[pkgpath.basename]
names = self.new(ext='').relto(pkgpath.dirpath())
names = names.split(self.sep)
if names and names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
# no package scope, still make it possible
if ensuresyspath:
self._prependsyspath(self.dirpath())
modname = self.purebasename
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# we in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
mod = py.std.types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
argv = map_as_list(str, argv)
popen_opts['stdout'] = popen_opts['stderr'] = PIPE
proc = Popen([str(self)] + argv, **popen_opts)
stdout, stderr = proc.communicate()
ret = proc.wait()
if py.builtin._isbytes(stdout):
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
if ret != 0:
if py.builtin._isbytes(stderr):
stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
raise py.process.cmdexec.Error(ret, ret, str(self),
stdout, stderr,)
return stdout
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = py.path.local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = py.std.os.environ['Path'].split(';')
if '' not in paths and '.' not in paths:
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [re.sub('%SystemRoot%', systemroot, path)
for path in paths]
else:
paths = py.std.os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = py.path.local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except py.error.EACCES:
pass
return None
sysfind = classmethod(sysfind)
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
except KeyError:
return None
return cls(x)
_gethomedir = classmethod(_gethomedir)
#"""
#special class constructors for local filesystem paths
#"""
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
return py.path.local(py.std.tempfile.gettempdir())
get_temproot = classmethod(get_temproot)
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
mkdtemp = classmethod(mkdtemp)
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout = 172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed.
"""
if rootdir is None:
rootdir = cls.get_temproot()
def parse_num(path):
""" parse the number out of a path (if it matches the prefix) """
bn = path.basename
if bn.startswith(prefix):
try:
return int(bn[len(prefix):])
except ValueError:
pass
# compute the maximum number currently in use with the
# prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum+1))
except py.error.EEXIST:
# race condition: another thread/process created the dir
# in the meantime. Try counting again
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
# put a .lock file in the new directory that will be removed at
# process exit
if lock_timeout:
lockfile = udir.join('.lock')
mypid = os.getpid()
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
lockfile.write(str(mypid))
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except py.error.Error:
pass
atexit.register(try_remove_lockfile)
# prune old directories
if keep:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
lf = path.join('.lock')
try:
t1 = lf.lstat().mtime
t2 = lockfile.lstat().mtime
if not lock_timeout or abs(t2-t1) < lock_timeout:
continue # skip directories still locked
except py.error.Error:
pass # assume that it means that there is no 'lf'
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ['USER'] #linux, et al
except KeyError:
try:
username = os.environ['USERNAME'] #windows
except KeyError:
username = 'current'
src = str(udir)
dest = src[:src.rfind('-')] + '-' + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
make_numbered_dir = classmethod(make_numbered_dir)
def copymode(src, dest):
py.std.shutil.copymode(str(src), str(dest))
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open('rb')
try:
fdest = dest.open('wb')
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name:
if not (name[0].isalpha() or name[0] == '_'):
return False
name= name.replace("_", '')
return not name or name.isalnum()
|
jessekl/flixr
|
venv/lib/python2.7/site-packages/py/_path/local.py
|
Python
|
mit
| 31,843
|
[
"VisIt"
] |
adc9852207fb520b6f9f1e2aefd6299aced993639231c4dfe2a4c4031916ec8f
|
"""
This module contains code for adding queries from a file. It should take a filepath
and type, a reference to an open database, and some other basic information in order
to parse the file correctly and then add the resulting queries to the database.
For now, start with just FASTA files, but eventually should accommodate other file
types and also MSA-based files (although here the file itself might be the sole
reference to the query itself).
"""
#from queries import query_obj
from queries import query_objects
class QueryFile():
"""Generic class for a file to add queries from
NB: previously had an attribute for 'filetype', in order to provide info
regarding subclass usage, but likely better to do something in a separate
class eventually that instantiates a subclass based on the type"""
def __init__(self, filepath, db_type,
record=None, self_blast=None):
self.filepath = filepath
self.db_type = db_type
self.record = record
self.self_blast = self_blast
def parse(self):
"""Implement in subclass"""
pass
def add_queries(self):
"""Implement in subclass"""
pass
class FastaFile(QueryFile):
"""FASTA-format class for adding FASTA queries"""
def parse(self):
"""Uses BioPython to parse file and returns a lazy generator for
all entries within that file"""
from Bio import SeqIO
return SeqIO.parse(self.filepath, "fasta")
def get_queries(self):
"""Adds parsed queries to returned data structure"""
queries = []
for seq_record in self.parse():
qobj = query_objects.SeqQuery(
identity=seq_record.id,
name=seq_record.name,
description=seq_record.description,
location=self.filepath,
alphabet=self.db_type,
sequence=seq_record.seq,
record=self.record,
racc_mode=self.self_blast)
queries.append([seq_record.id, qobj])
return queries
class HMMFile(QueryFile):
"""HMM-format class for adding HMM queries"""
def parse(self):
"""Somewhat naive, assumes only one query per file"""
with open(self.filepath,'U') as f:
return f.read()
def get_query(self):
"""Just returns a single query object"""
hmm_obj = self.parse()
import re
m = re.search(r'NAME\s+([\w.]+)', hmm_obj) # matches NAME field of header
name = m.group(1)
qobj = query_objects.HMMQuery(
identity=name,
name=name,
description=name,
location=self.filepath,
alphabet=self.db_type,
sequence=hmm_obj)
return (name,qobj)
|
chris-klinger/Goat
|
queries/query_file.py
|
Python
|
gpl-3.0
| 2,895
|
[
"Biopython"
] |
bf32c4d85baf67b17669abf96c0e9492009150f85d4d408e26f0a6ab7734722a
|
from .test_base import TestBase
from werkzeug.exceptions import NotFound, Forbidden
from app.exceptions import ConflictError, ValidationError
class TestBucketlistItems(TestBase):
""" Test users' Bucketlist items """
def test_add_bucketlist_item(self):
""" Test for new item creation """
res, json = self.client.post('/api/v1/bucketlists/1/items/',
data={
'name': 'Prepare for launch',
'description': 'check FTL engine',
'done': 1
})
self.assertEqual(res.status_code, 201)
self.assertTrue(
json['message'],
"Bucketlist item successfuly created"
)
location = res.headers['Location']
res1, json1 = self.client.get(location)
self.assertEqual(res1.status_code, 200)
self.assertIn('Prepare', json1['name'])
self.assertEqual(json1['self_url'], location)
self.assertTrue(json1['description'] == 'check FTL engine')
def test_add_bucketlist_item_with_empty_name_string_or_no_name(self):
with self.assertRaises(ValidationError):
self.client.post('/api/v1/bucketlists/1/items/', data={'name': ''})
with self.assertRaises(ValidationError):
self.client.post('/api/v1/bucketlists/1/items/', data={'pass': ''})
def test_update_bucketlist_item(self):
""" Test for updating an item """
res, json = self.client.put('/api/v1/bucketlists/1/items/1',
data={"name": "Edited blist item name"})
self.assertEqual(res.status_code, 200)
self.assertTrue(
json['message'],
"Bucketlist item successfuly updated"
)
res1, json1 = self.client.put('/api/v1/bucketlists/1/items/1',
data={
'description': 'Edited item desc',
'done': 1
})
self.assertEqual(res1.status_code, 200)
self.assertTrue(
json1['message'],
"Bucketlist item successfuly updated"
)
def test_delete_bucketlist_item(self):
""" Test deletion of a bucketlist item """
res, json = self.client.delete('/api/v1/bucketlists/1/items/1')
self.assertEqual(res.status_code, 200)
self.assertTrue(
json['message'],
"Your bucketlist item was successfuly deleted"
)
def test_get_bucketlist_item(self):
""" Test that we can fetch a specific bucket list item """
# Get bucket list whose ID is 1
res, json = self.client.get('/api/v1/bucketlists/1/items/1')
self.assertEqual(res.status_code, 200)
self.assertIn('Build a Time Machine', json['name'])
self.assertTrue(json['description'] == 'Pay Neil deGrasse a visit')
def test_get_bucketlist_items(self):
""" Test that all bucketlist items are returned """
res, json = self.client.get('/api/v1/bucketlists/1/items/')
self.assertEqual(res.status_code, 200)
def test_operations_on_invalid_bucketlist_item(self):
"""
Tests to cover all invalid bucketlist items scenarios
"""
with self.assertRaises(NotFound):
self.client.get('/api/v1/bucketlists/1/items/233')
""" Test editing a bucketlist item that doesn't exist """
with self.assertRaises(NotFound):
self.client.put('/api/v1/bucketlists/1/items/221',
data={
"name": "ndoo5",
"description": "no desc"
})
""" Test deletion of a bucketlist item that does not exist """
with self.assertRaises(NotFound):
self.client.delete('/api/v1/bucketlists/1/items/221')
def test_add_duplicate_bucketlist_item(self):
""" Test creation of a bucketlist item with an existing name """
with self.assertRaises(ConflictError):
self.client.post('/api/v1/bucketlists/1/items/',
data={"name": "Build a Time Machine"})
def test_non_existent_bucketlists_and_items(self):
"""
Tests to cover all invalid bucketlists scenarios
"""
with self.assertRaises(NotFound):
self.client.get('/api/v1/bucketlists/198/items/1')
""" Test editing a bucketlist that doesn't exist """
with self.assertRaises(NotFound):
self.client.put('/api/v1/bucketlists/456/items/1',
data={"name": "ndoo5", "description": "no desc"})
""" Test deletion of a bucketlist that does not exist """
with self.assertRaises(NotFound):
self.client.delete('/api/v1/bucketlists/61/items/1')
def test_bucketlist_item_operations_on_another_users_bucketlist(self):
""" Test that users cannot access other users' bucketlist items """
# Attempt to get another user's bucketlist item
with self.assertRaises(Forbidden):
self.client2.get('/api/v1/bucketlists/1/items/')
# Attempt to update another user's bucketlist item
with self.assertRaises(Forbidden):
self.client2.put('/api/v1/bucketlists/1/items/1',
data={"name": "ndoo6", "description": "desc"})
""" Test deletion of another user's bucketlist item"""
with self.assertRaises(Forbidden):
self.client2.delete('/api/v1/bucketlists/1/items/1')
""" Test creation of an item in another user's bucketlist """
with self.assertRaises(Forbidden):
self.client2.post('/api/v1/bucketlists/1/items/',
data={"name": "New Item 123"})
|
andela-akhenda/maisha-goals
|
tests/test_bucketlist_items.py
|
Python
|
mit
| 5,947
|
[
"VisIt"
] |
1dbedf9732726fb09cb8d489a067e73ebf92383dc84c9cf7d959a5ee9661ed6a
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-07 02:14
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blinkotron',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('visit', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
|
iannesbitt/iannesbitt.org
|
blinker/migrations/0001_initial.py
|
Python
|
mpl-2.0
| 607
|
[
"VisIt"
] |
ffaad4bd751275b24bfd36891a88a958d8ec1aa40976670c27e0e51bacd1b2f9
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import sys
sys.path.append('/home/will/PySeqUtils/')
from GeneralSeqTools import fasta_reader, fasta_writer
import os
os.chdir('/home/will/PySeqUtils/TransToolStuff/')
# <codecell>
from itertools import islice
start = 806
stop = -1
path = 'HIV1_ALL_2012_env_PRO.fasta'
outpath = 'HIV1_ALL_2012_gp41_PRO.fasta'
with open(path) as handle:
for name, seq in islice(fasta_reader(handle), 20):
tseq = seq[start:stop]
print tseq[:5], tseq[-5:]
# <codecell>
seqs = []
with open(path) as handle:
for name, seq in fasta_reader(handle):
seqs.append((name, seq[start:stop]))
with open(outpath, 'w') as handle:
fasta_writer(handle, seqs)
# <codecell>
from Bio import Entrez
from Bio import SeqIO
ids = '544451412,544451410,544451408,544451406,544451404,544451402,544451400,544451398,544451396'
fetch_handle = Entrez.efetch(db="nucleotide", rettype="gb", retmode="text",
id=ids)
records = list(SeqIO.parse(fetch_handle, "gb"))
# <codecell>
rec = records[0]
# <codecell>
rec.annotations['gi']
# <codecell>
batch_size = 1000
num_res = 1300
inds = range(0, num_res, batch_size)+[num_res]
start_inds = inds
stop_inds = inds[1:]
zip(start_inds, stop_inds)
# <codecell>
from collections import defaultdict
counts = defaultdict(int)
seqs = []
with open('/home/will/WLAHDB_data/RegionDBs/LTR/HIV1_ALL_2012_ltr_DNA.fasta') as handle:
for name, seq in fasta_reader(handle):
seqs.append((name, seq.replace('-', '')))
with open('/home/will/WLAHDB_data/RegionDBs/LTR/LTR.fasta', 'w') as handle:
fasta_writer(handle, seqs)
# <codecell>
max(len(s) for _, s in seqs)
# <codecell>
from Bio import SeqIO
from Bio.Alphabet import generic_dna
with open('/home/will/WLAHDB_data/SubtypeDB/HIV1_genome_DNA.fasta') as handle:
seqs = list(SeqIO.parse(handle, 'fasta', generic_dna))
# <codecell>
import glob
files = glob.glob('/home/will/WLAHDB_data/GenbankDL/*.gb')
counts = []
for fnum, f in enumerate(files):
if fnum % 10000 == 0:
print fnum, sum(counts)
with open(f) as handle:
for num, seq in enumerate(SeqIO.parse(handle, 'gb'), 1):
pass
counts.append(num)
# <codecell>
from Bio.Blast import NCBIXML
from Bio.Blast.Applications import NcbiblastxCommandline, NcbiblastnCommandline
from tempfile import NamedTemporaryFile
from StringIO import StringIO
window_size = 500
inds = range(0, len(seq), window_size) + [len(seq)]
seq = seqs[0]
blocks = []
for start, stop in zip(inds, inds[1:]):
blocks.append(seq[start:stop])
with NamedTemporaryFile(suffix='.fasta', delete=False) as handle:
with NamedTemporaryFile() as ohandle:
SeqIO.write(blocks, handle, 'fasta')
handle.flush()
os.fsync(handle.fileno())
cline = NcbiblastnCommandline(db='/home/will/WLAHDB_data/SubtypeDB/HIV1_genome_DNA.fasta',
query=handle.name,
out=ohandle.name,
outfmt=5)
_, _ = cline()
records = list(NCBIXML.parse(ohandle))
# <codecell>
rec = records[0]
align = rec.alignments[0]
# <codecell>
rec.query
# <codecell>
align.hit_def
# <codecell>
|
JudoWill/ResearchNotebooks
|
Untitled1.py
|
Python
|
mit
| 3,297
|
[
"BLAST"
] |
0edcfc18ba989aa81adc5010e188a407e832ff2555012d327e20df654d36cc0b
|
from neat import (NetworkSpec, GeneSpec,
NumericParamSpec as PS, NominalParamSpec as NPS,
Mutator, NEAT, neuron, connection)
#### CONFIG #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####
neuron_sigma = 0.25 # mutation sigma for neuron params
conn_sigma = 1.0 # mutation sigma for connection params
conf = dict(
pop_size = 5, # population size
elite_size = 1, # size of the elite club
tournament_size = 4, # size of the selection subsample (must be in the range [2, pop_size])
neuron_param_mut_proba = 0.5, # probability to mutate each single neuron in the genome
connection_param_mut_proba = 0.5, # probability to mutate each single connection in the genome
structural_augmentation_proba = 0.8,# probability to augment the topology of a newly created genome
structural_removal_proba = 0.0, # probability to diminish the topology of a newly created genome
speciation_threshold = 0.005 # genomes that are more similar than this value will be considered the same species
)
#### ###### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####
net_spec = NetworkSpec(
[
GeneSpec('input',
NPS('layer', ['input'], mutable=False)
),
GeneSpec('sigmoid',
PS('bias', -1., 1., neuron_sigma, mutable=True),
PS('gain', 0, 1., neuron_sigma, mutable=True),
NPS('layer', ['hidden'], mutable=False)
)
],
[
GeneSpec('default',
PS('weight', mutation_sigma=conn_sigma, mean_value = 0., mutable=True))
]
)
mut = Mutator(net_spec, allowed_neuron_types = ['sigmoid'])
neat_obj = NEAT(mutator = mut, **conf)
genome = neat_obj.get_init_genome(
in1=neuron('input', protected=True, layer='input'),
in2=neuron('input', protected=True, layer='input'),
out1=neuron('sigmoid', protected=True, layer='output'),
connections=[
connection('default', protected=False, src='in1', dst='out1', weight = 0.33433),
connection('default', protected=False, src='in2', dst='out1', weight = -0.77277)
]
)
with open('init_genome.yaml', 'w+') as outfile:
outfile.write(genome.to_yaml())
|
egdman/neat-lite
|
examples/init_example.py
|
Python
|
mit
| 2,364
|
[
"NEURON"
] |
ee99dede93efa88e926de1e29c198b02eed295de7decc925cc6c25effd059427
|
from ase import *
from ase.constraints import StrainFilter
a = 3.6
b = a / 2
cu = Atoms('Cu', cell=[(0,b,b),(b,0,b),(b,b,0)], pbc=1) * (6, 6, 6)
try:
import Asap
except ImportError:
pass
else:
cu.set_calculator(ASAP())
f = StrainFilter(cu, [1, 1, 1, 0, 0, 0])
opt = MDMin(f, dt=0.01)
t = PickleTrajectory('Cu.traj', 'w', cu)
opt.attach(t)
opt.run(0.001)
# HCP:
from ase.lattice.surface import hcp0001
cu = hcp0001('Cu', (1, 1, 2), a=a / sqrt(2))
cu.cell[1,0] += 0.05
cu *= (6, 6, 3)
try:
import Asap
except ImportError:
pass
else:
cu.set_calculator(ASAP())
f = StrainFilter(cu)
opt = MDMin(f, dt=0.01)
t = PickleTrajectory('Cu.traj', 'w', cu)
opt.attach(t)
opt.run(0.01)
|
freephys/python_ase
|
ase/test/strain.py
|
Python
|
gpl-3.0
| 737
|
[
"ASE"
] |
21566fd2e7cdb9e597ea253c8b18ae369c297b830524a952c8ca0affae85f5c5
|
# coding: utf-8
#
# Copyright 2015 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from bson import ObjectId
from gridfs import GridFS
import pymongo
from pypln.backend.celery_task import PyPLNTask
from pypln.backend import config
class GridFSDataRetriever(PyPLNTask):
def process(self, document):
database = pymongo.MongoClient(host=config.MONGODB_CONFIG['host'],
port=config.MONGODB_CONFIG['port']
)[config.MONGODB_CONFIG['database']]
gridfs = GridFS(database, config.MONGODB_CONFIG['gridfs_collection'])
file_data = gridfs.get(ObjectId(document['file_id']))
result = {'length': file_data.length,
'md5': file_data.md5,
'filename': file_data.filename,
'upload_date': file_data.upload_date,
'contents': file_data.read()}
return result
|
fccoelho/pypln.backend
|
pypln/backend/workers/gridfs_data_retriever.py
|
Python
|
gpl-3.0
| 1,547
|
[
"NAMD"
] |
e8e2074a4af4671280c8ea4a361ab8a77b3c8c7a4ad823032b79282ef1cf9463
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import biom
import pandas as pd
import skbio.diversity
# We should consider moving these functions to scikit-bio. They're part of
# the private API here for now.
def phylogenetic_metrics():
return {'faith_pd'}
def non_phylogenetic_metrics():
return {'ace', 'chao1', 'chao1_ci', 'berger_parker_d', 'brillouin_d',
'dominance', 'doubles', 'enspie', 'esty_ci', 'fisher_alpha',
'goods_coverage', 'heip_e', 'kempton_taylor_q', 'margalef',
'mcintosh_d', 'mcintosh_e', 'menhinick', 'michaelis_menten_fit',
'observed_otus', 'osd', 'pielou_e', 'robbins', 'shannon',
'simpson', 'simpson_e', 'singles', 'strong', 'gini_index',
'lladser_pe', 'lladser_ci'}
def alpha_phylogenetic(table: biom.Table, phylogeny: skbio.TreeNode,
metric: str) -> pd.Series:
if metric not in phylogenetic_metrics():
raise ValueError("Unknown phylogenetic metric: %s" % metric)
if table.is_empty():
raise ValueError("The provided table object is empty")
counts = table.matrix_data.toarray().astype(int).T
sample_ids = table.ids(axis='sample')
feature_ids = table.ids(axis='observation')
try:
result = skbio.diversity.alpha_diversity(metric=metric,
counts=counts,
ids=sample_ids,
otu_ids=feature_ids,
tree=phylogeny)
except skbio.tree.MissingNodeError as e:
message = str(e).replace('otu_ids', 'feature_ids')
message = message.replace('tree', 'phylogeny')
raise skbio.tree.MissingNodeError(message)
result.name = metric
return result
def alpha(table: biom.Table, metric: str) -> pd.Series:
if metric not in non_phylogenetic_metrics():
raise ValueError("Unknown metric: %s" % metric)
if table.is_empty():
raise ValueError("The provided table object is empty")
counts = table.matrix_data.toarray().astype(int).T
sample_ids = table.ids(axis='sample')
result = skbio.diversity.alpha_diversity(metric=metric, counts=counts,
ids=sample_ids)
result.name = metric
return result
|
maxvonhippel/q2-diversity
|
q2_diversity/_alpha/_method.py
|
Python
|
bsd-3-clause
| 2,667
|
[
"scikit-bio"
] |
2236537edcb3075c22184187b6bb6d310987aaeac10793a707a38755b3cd271a
|
#!/usr/bin/env python
# @package vfnow
# \author Ed Bueler and Constantine Khroulev, University of Alaska Fairbanks, USA
# \brief A script for verification of numerical schemes in PISM.
# \details It specifies a refinement path for each of Tests ABCDEFGIJKL and runs
# pismv accordingly.
# Copyright (C) 2007--2013 Ed Bueler and Constantine Khroulev
##
# Organizes the process of verifying PISM. It specifies standard refinement paths for each of the tests described in the user manual. It runs the tests, times them, and summarizes the numerical errors reported at the end.
##
# Examples:
# - \verbatim vfnow.py \endverbatim use one processor and do three levels of refinement; this command is equivalent to \verbatim vfnow.py -n 2 -l 2 -t CGIJ \endverbatim,
# - \verbatim vfnow.py -n 8 -l 5 -t J --prefix=bin/ --mpido='aprun -n' \endverbatim will use \verbatim aprun -n 8 bin/pismv \endverbatim as the command and do five levels (the maximum) of refinement only on test J,
# - \verbatim vfnow.py -n 2 -l 3 -t CEIJGKLO \endverbatim uses two processers (cores) and runs in about an hour,
# - \verbatim vfnow.py -n 40 -l 5 -t ABCDEFGIJKLO \endverbatim will use forty processors to do all possible verification as managed by \c vfnow.py; don't run this unless you have a big computer and you are prepared to wait.
# For a list of options do \verbatim test/vfnow.py --help \endverbatim.
# Timing information is given in the \c vfnow.py output so performance, including parallel performance, can be assessed along with accuracy.
import sys
import time
import commands
from numpy import array
# A class describing a refinement path and command-line options
# for a particular PISM verification test.
class PISMVerificationTest:
# max number of levels that will work with
N = 50
# one-letter test name
name = ""
# test description
test = ""
# description of the refinement path
path = ""
Mx = []
My = []
# 31 levels in the ice
Mz = [31] * N
# no bedrock by default
Mbz = [1] * N
# extra options (such as -y, -ys, -ssa_rtol)
opts = ""
executable = "pismv"
def build_command(self, exec_prefix, level):
M = zip(self.Mx, self.My, self.Mz, self.Mbz)
if level > len(M):
print "Test %s: Invalid refinement level: %d (only %d are available)" % (
self.name, level, len(M))
return ""
grid_options = "-Mx %d -My %d -Mz %d -Mbz %d" % M[level - 1]
return "%s%s -test %s %s %s" % (exec_prefix, self.executable, self.name, grid_options, self.opts)
def run_test(executable, name, level, extra_options="", debug=False):
try:
test = tests[name]
except:
print "Test %s not found." % name
return
if level == 1:
print "# ++++ TEST %s: verifying with %s exact solution ++++\n# %s" % (
test.name, test.test, test.path)
else:
extra_options += " -append"
command = test.build_command(executable, level) + " " + extra_options
if debug:
print '# L%d\n%s' % (level, command)
return
else:
print ' L%d: trying "%s"' % (level, command)
# run PISM:
try:
lasttime = time.time()
(status, output) = commands.getstatusoutput(command)
elapsetime = time.time() - lasttime
except KeyboardInterrupt:
sys.exit(2)
if status:
sys.exit(status)
print ' finished in %7.4f seconds; reported numerical errors as follows:' % elapsetime
# process the output:
position = output.find('NUMERICAL ERRORS')
if position >= 0:
report = output[position:output.find('NUM ERRORS DONE')]
endline = report.find('\n')
print ' ' + report[0:endline]
report = report[endline + 1:]
while (len(report) > 1) and (endline > 0):
endline = report.find('\n')
if endline == -1:
endline = len(report)
print ' #' + report[0:endline]
report = report[endline + 1:]
endline = report.find('\n')
if endline == -1:
endline = len(report)
print ' |' + report[0:endline]
report = report[endline + 1:]
else:
print " ERROR: can't find reported numerical error"
sys.exit(99)
def define_refinement_paths(KSPRTOL, SSARTOL):
# Define all the supported refinement paths:
tests = {}
# A
A = PISMVerificationTest()
A.name = "A"
A.test = "steady, marine margin isothermal SIA"
A.path = "(refine dx=53.33,40,26.67,20,13.33,km, dx=dy and Mx=My=31,41,61,81,121)"
A.Mx = [31, 41, 61, 81, 121]
A.My = A.Mx
A.opts = "-y 25000.0"
tests['A'] = A
# B
B = PISMVerificationTest()
B.name = "B"
B.test = "moving margin isothermal SIA (Halfar)"
B.path = "(refine dx=80,60,40,30,20,km, dx=dy and Mx=My=31,41,61,81,121)"
B.Mx = [31, 41, 61, 81, 121]
B.My = B.Mx
B.opts = "-ys 422.45 -y 25000.0"
tests['B'] = B
# C
C = PISMVerificationTest()
C.name = "C"
C.test = "non-zero accumulation moving margin isothermal SIA"
C.path = "(refine dx=50,33.33,25,20,16,km, dx=dy and Mx=My=41,61,81,101,121)"
C.Mx = [41, 61, 81, 101, 121]
C.My = C.Mx
C.opts = "-y 15208.0"
tests['C'] = C
# D
D = PISMVerificationTest()
D.name = "D"
D.test = "time-dependent isothermal SIA"
D.path = "(refine dx=50,33.33,25,20,16.67,km, dx=dy and Mx=My=41,61,81,101,121)"
D.Mx = [41, 61, 81, 101, 121]
D.My = D.Mx
D.opts = "-y 25000.0"
tests['D'] = D
# E
E = PISMVerificationTest()
E.name = "E"
E.test = "steady sliding marine margin isothermal SIA"
E.path = "(refine dx=53.33,40,26.67,20,13.33,km, dx=dy and Mx=My=31,41,61,81,121)"
E.Mx = [31, 41, 61, 81, 121]
E.My = E.Mx
E.opts = "-y 25000.0"
tests['E'] = E
# F
F = PISMVerificationTest()
F.name = "F"
F.test = "steady thermomechanically-coupled SIA"
F.path = "(refine dx=30,20,15,10,7.5,km, dx=dy, dz=66.67,44.44,33.33,22.22,16.67 m and Mx=My=Mz=61,91,121,181,241)"
F.Mx = [61, 91, 121, 181, 241]
F.My = F.Mx
F.Mz = F.Mx
F.opts = "-y 25000.0"
tests['F'] = F
# G
G = PISMVerificationTest()
G.name = "G"
G.test = "time-dependent thermomechanically-coupled SIA"
G.path = "(refine dx=30,20,15,10,7.5,km, dx=dy, dz=66.67,44.44,33.33,22.22,16.67 m and Mx=My=Mz=61,91,121,181,241)"
G.Mx = [61, 91, 121, 181, 241]
G.My = G.Mx
G.Mz = G.Mx
G.opts = "-y 25000.0"
tests['G'] = G
# H
H = PISMVerificationTest()
H.name = "H"
H.test = "moving margin, isostatic bed, isothermal SIA"
H.path = "(refine dx=80,60,40,30,20,km, dx=dy and Mx=My=31,41,61,81,121)"
H.Mx = [31, 41, 61, 81, 121]
H.My = H.Mx
H.opts = "-bed_def iso -y 60000.0"
tests['H'] = H
# I
I = PISMVerificationTest()
I.executable = "ssa_testi"
I.name = "I"
I.test = "plastic till ice stream (SSA)"
I.path = "(refine dy=5000,1250,312.5,78.13,19.53,m, My=49,193,769,3073,12289)"
I.Mx = [5] * 5
I.My = [49, 193, 769, 3073, 12289]
I.executable = "ssa_testi"
I.opts = "-ssa_method fd -ssa_rtol %1.e -ksp_rtol %1.e" % (SSARTOL, KSPRTOL)
tests['I'] = I
# J
J = PISMVerificationTest()
J.executable = "ssa_testj"
J.name = "J"
J.test = "periodic ice shelf (linearized SSA)"
J.path = "(refine dy=5000,1250,312.5,78.13,19.53,m, Mx=49,193,769,3073,12289)"
J.Mx = [49, 98, 196, 392, 784]
J.My = J.Mx
J.Mz = [11] * 5
J.executable = "ssa_testj"
J.opts = "-ssa_method fd -pc_type asm -sub_pc_type lu -ksp_rtol %1.e" % KSPRTOL
tests['J'] = J
# K
K = PISMVerificationTest()
K.name = "K"
K.test = "pure conduction problem in ice and bedrock"
K.path = "(refine dz=100,50,25,12.5,6.25,m, Mz=41,81,161,321,641)"
K.Mx = [8] * 5
K.My = K.Mx
K.Mz = array([41, 81, 161, 321, 641])
K.Mbz = (K.Mz - 1) / 4 + 1
K.opts = "-y 130000.0 -Lbz 1000 -z_spacing equal"
tests['K'] = K
# L
L = PISMVerificationTest()
L.name = "L"
L.test = "non-flat bed stead isothermal SIA"
L.path = "(refine dx=60,30,20,15,10,km, dx=dy and Mx=My=31,61,91,121,181)"
L.Mx = [31, 61, 91, 121, 181]
L.My = L.Mx
L.opts = "-y 25000.0"
tests['L'] = L
# M
M = PISMVerificationTest()
M.name = "M"
M.test = "annular ice shelf with a calving front (SSA)"
M.path = "(refine dx=50,25,16.666,12.5,8.333 km; dx=dy and My=31,61,91,121,181)"
M.Mx = [31, 61, 91, 121, 181]
M.My = M.Mx
M.Mz = [11] * 5
M.opts = "-ssa_rtol %1.e -ksp_rtol %1.e" % (SSARTOL, KSPRTOL)
tests['M'] = M
# O
O = PISMVerificationTest()
O.name = "O"
O.test = "basal melt rate from conduction problem in ice and bedrock"
O.path = "(refine dz=100,50,25,12.5,6.25,m, Mz=41,81,161,321,641)"
O.Mx = [8] * 5
O.My = O.Mx
O.Mz = array([41, 81, 161, 321, 641])
O.Mbz = (O.Mz - 1) / 4 + 1
O.opts = "-z_spacing equal -zb_spacing equal -Lbz 1000 -y 1000 -no_mass"
tests['O'] = O
# test K (for a figure in the User's Manual)
K = PISMVerificationTest()
K.name = "K"
K.test = "pure conduction problem in ice and bedrock"
K.path = "(lots of levels)"
K.Mz = array([101, 121, 141, 161, 181, 201, 221, 241, 261, 281, 301, 321])
K.Mbz = (K.Mz - 1) / 4 + 1
K.Mx = [8] * len(K.Mz)
K.My = K.Mx
K.opts = "-y 130000.0 -Lbz 1000"
tests['K_userman'] = K
# test B (for a figure in the User's Manual)
B = PISMVerificationTest()
B.name = "B"
B.test = "moving margin isothermal SIA (Halfar)"
B.path = "(lots of levels)"
B.Mx = [31, 41, 51, 61, 71, 81, 91, 101, 111, 121]
B.My = B.Mx
B.Mz = [31] * len(B.Mx)
B.Mbz = [1] * len(B.Mx)
B.opts = "-ys 422.45 -y 25000.0"
tests['B_userman'] = B
# test G (for a figure in the User's Manual)
G = PISMVerificationTest()
G.name = "G"
G.test = "time-dependent thermomechanically-coupled SIA"
G.path = "(lots of levels)"
G.Mx = [61, 71, 81, 91, 101, 111, 121, 151, 181]
G.My = G.Mx
G.Mz = G.Mx
G.opts = "-y 25000.0"
tests['G_userman'] = G
# test I (for a figure in the User's Manual)
I = PISMVerificationTest()
I.executable = "ssa_testi"
I.name = "I"
I.test = "plastic till ice stream (SSA)"
I.path = "(lots of levels)"
I.My = [51, 101, 151, 201, 401, 601, 801, 1001, 1501, 2001, 2501, 3073]
I.Mx = [5] * len(I.My)
I.opts = "-ssa_method fd -ssa_rtol %1.e -ksp_rtol %1.e" % (SSARTOL, KSPRTOL)
tests['I_userman'] = I
return tests
from argparse import ArgumentParser
parser = ArgumentParser()
parser.description = """PISM verification script"""
parser.add_argument("--eta", dest="eta", action="store_true",
help="to add '-eta' option to pismv call")
parser.add_argument("-l", dest="levels", type=int, default=2,
help="number of levels of verification; '-l 1' fast, '-l 5' slowest")
parser.add_argument("--mpido", dest="mpido", default="mpiexec -np",
help="specify MPI executable (e.g. 'mpirun -np' or 'aprun -n')")
parser.add_argument("-n", dest="n", type=int, default=2,
help="number of processors to use")
parser.add_argument("--prefix", dest="prefix", default="",
help="path prefix to pismv executable")
parser.add_argument("-r", dest="report_file", default="",
help="name of the NetCDF error report file")
parser.add_argument("-t", dest="tests", nargs="+",
help="verification tests to use (A,B,C,D,E,F,G,H,I,J,K,L,M,O); specify a space-separated list", default=['C', 'G', 'I', 'J'])
parser.add_argument("-u", dest="unequal", action="store_true",
help="use quadratic vertical grid spacing")
parser.add_argument("--debug", dest="debug", action="store_true",
help="just print commands in sequence (do not run pismv)")
parser.add_argument("--userman", dest="userman", action="store_true",
help="run tests necessary to produce figures in the User's Manual")
options = parser.parse_args()
extra_options = ""
if options.eta:
extra_options += " -eta"
if options.unequal:
extra_options += " -z_spacing quadratic"
if options.report_file:
extra_options += " -report_file %s" % options.report_file
predo = ""
if options.n > 1:
predo = "%s %d " % (options.mpido, options.n)
exec_prefix = predo + options.prefix
KSPRTOL = 1e-12 # for tests I, J, M
SSARTOL = 5e-7 # ditto
tests = define_refinement_paths(KSPRTOL, SSARTOL)
userman_tests = ["B_userman", "G_userman", "K_userman", "I_userman"]
if options.userman:
print "# VFNOW.PY: test(s) %s, using '%s...'\n" % (userman_tests, exec_prefix) + \
"# and ignoring options -t and -l"
for test in userman_tests:
N = len(tests[test].Mx)
for j in range(1, N + 1):
run_test(exec_prefix, test, j, extra_options,
options.debug)
else:
print "# VFNOW.PY: test(s) %s, %d refinement level(s), using '%s...'" % (
options.tests, options.levels, exec_prefix)
for test in options.tests:
for j in range(1, options.levels + 1):
run_test(exec_prefix, test, j, extra_options,
options.debug)
|
talbrecht/pism_pik07
|
test/vfnow.py
|
Python
|
gpl-3.0
| 13,396
|
[
"NetCDF"
] |
7ab58d5db6a27164a1d12e1110aa7051669cfb10a8969acebe54e810218e0a5c
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import collections
from typing import Dict, List, Union
import numpy as np
from qcelemental.models import AtomicInput
import qcengine as qcng
from psi4 import core
from psi4.driver import p4util
from psi4.driver import driver_findif
from psi4.driver.p4util.exceptions import ValidationError
_engine_can_do = collections.OrderedDict([('libdisp', ['d1', 'd2', 'chg', 'das2009', 'das2010']),
('dftd3', ['d2', 'd3zero', 'd3bj', 'd3mzero', 'd3mbj']),
('nl', ['nl']),
('mp2d', ['dmp2']),
("dftd4", ["d4bjeeqatm"]),
]) # yapf: disable
_capable_engines_for_disp = collections.defaultdict(list)
for eng, disps in _engine_can_do.items():
for disp in disps:
_capable_engines_for_disp[disp].append(eng)
class EmpiricalDispersion(object):
"""Lightweight unification of empirical dispersion calculation modes.
Attributes
----------
dashlevel : str
{'d1', 'd2', 'd3zero', 'd3bj', 'd3mzero', 'd3mbj', 'chg', 'das2009', 'das2010', 'nl', 'dmp2', "d4bjeeqatm"}
Name of dispersion correction to be applied. Resolved
from `name_hint` and/or `level_hint` into a key of
`empirical_dispersion_resources.dashcoeff`.
dashparams : dict
Complete set of parameter values defining the flexible parts
of :py:attr:`dashlevel`. Number and parameter names vary by
:py:attr:`dashlevel`. Resolved into a complete set (keys of
dashcoeff[dashlevel]['default']) from `name_hint` and/or
`dashcoeff_supplement` and/or user `param_tweaks`.
fctldash : str
If :py:attr:`dashparams` for :py:attr:`dashlevel` corresponds to a defined,
named, untweaked "functional-dashlevel" set, then that
functional. Otherwise, empty string.
description : str
Tagline for dispersion :py:attr:`dashlevel`.
dashlevel_citation : str
Literature reference for dispersion :py:attr:`dashlevel` in general,
*not necessarily* for :py:attr:`dashparams`.
dashparams_citation : str
Literature reference for dispersion parameters, if :py:attr:`dashparams`
corresponds to a defined, named, untweaked "functional-dashlevel"
set with a citation. Otherwise, empty string.
dashcoeff_supplement : dict
See description in `qcengine.programs.empirical_dispersion_resources.from_arrays`. Used
here to "bless" the dispersion definitions attached to
the procedures/dft/<rung>_functionals-defined dictionaries
as legit, non-custom, and of equal validity to
`qcengine.programs.empirical_dispersion_resources.dashcoeff` itself for purposes of
validating :py:attr:`fctldash`.
engine : str
{'libdisp', 'dftd3', 'nl', 'mp2d', "dftd4"}
Compute engine for dispersion. One of Psi4's internal libdisp
library, external Grimme or Beran projects, or nl.
disp : Dispersion
Only present for :py:attr:`engine` `=libdisp`. Psi4 class instance prepared
to compute dispersion.
ordered_params : list
Fixed-order list of relevant parameters for :py:attr:`dashlevel`. Matches
:rst:psivar:`DISPERSION CORRECTION ENERGY` ordering. Used for printing.
Parameters
----------
name_hint
Name of functional (func only, func & disp, or disp only) for
which to compute dispersion (e.g., blyp, BLYP-D2, blyp-d3bj,
blyp-d3(bj), hf+d). Any or all parameters initialized from
``dashcoeff[dashlevel][functional-without-dashlevel]`` or
``dashcoeff_supplement[dashlevel][functional-with-dashlevel]``
can be overwritten via `param_tweaks`.
level_hint
Name of dispersion correction to be applied (e.g., d, D2,
d3(bj), das2010). Must be key in `dashcoeff` or "alias" or
"formal" to one.
param_tweaks
Values for the same keys as `dashcoeff[dashlevel]['default']`
(and same order if list) used to override any or all values
initialized by `name_hint`. Extra parameters will error.
engine
Override which code computes dispersion. See above for allowed
values. Really only relevant for -D2, which can be computed by
libdisp or dftd3.
"""
def __init__(self, *, name_hint: str = None, level_hint: str = None, param_tweaks: Union[Dict, List] = None, engine: str = None, save_pairwise_disp=False):
from .dft import dashcoeff_supplement
self.dashcoeff_supplement = dashcoeff_supplement
self.save_pairwise_disp = save_pairwise_disp
resolved = qcng.programs.empirical_dispersion_resources.from_arrays(
name_hint=name_hint,
level_hint=level_hint,
param_tweaks=param_tweaks,
dashcoeff_supplement=self.dashcoeff_supplement)
self.fctldash = resolved['fctldash']
self.dashlevel = resolved['dashlevel']
self.dashparams = resolved['dashparams']
self.description = qcng.programs.empirical_dispersion_resources.dashcoeff[self.dashlevel]['description']
self.ordered_params = qcng.programs.empirical_dispersion_resources.dashcoeff[self.dashlevel]['default'].keys()
self.dashlevel_citation = qcng.programs.empirical_dispersion_resources.dashcoeff[self.dashlevel]['citation']
self.dashparams_citation = resolved['dashparams_citation']
if engine is None:
self.engine = _capable_engines_for_disp[self.dashlevel][0]
else:
if self.dashlevel in _engine_can_do[engine]:
self.engine = engine
else:
raise ValidationError("""This little engine ({}) can't ({})""".format(engine, self.dashlevel))
if self.engine == 'libdisp':
self.disp = core.Dispersion.build(self.dashlevel, **resolved['dashparams'])
def print_out(self):
"""Format dispersion parameters of `self` for output file."""
text = []
text.append(" => {}: Empirical Dispersion <=".format(
(self.fctldash.upper() if self.fctldash.upper() else 'Custom')))
text.append('')
text.append(self.description)
text.append(self.dashlevel_citation.rstrip())
if self.dashparams_citation:
text.append(" Parametrisation from:{}".format(self.dashparams_citation.rstrip()))
text.append('')
for op in self.ordered_params:
text.append(" %6s = %14.6f" % (op, self.dashparams[op]))
text.append('\n')
core.print_out('\n'.join(text))
def compute_energy(self, molecule: core.Molecule, wfn: core.Wavefunction = None) -> float:
"""Compute dispersion energy based on engine, dispersion level, and parameters in `self`.
Parameters
----------
molecule
System for which to compute empirical dispersion correction.
wfn
Location to set QCVariables
Returns
-------
float
Dispersion energy [Eh].
Notes
-----
:psivar:`DISPERSION CORRECTION ENERGY`
Disp always set. Overridden in SCF finalization, but that only changes for "-3C" methods.
:psivar:`fctl DISPERSION CORRECTION ENERGY`
Set if :py:attr:`fctldash` nonempty.
"""
if self.engine in ['dftd3', 'mp2d', "dftd4"]:
resi = AtomicInput(
**{
'driver': 'energy',
'model': {
'method': self.fctldash,
'basis': '(auto)',
},
'keywords': {
'level_hint': self.dashlevel,
'params_tweaks': self.dashparams,
'dashcoeff_supplement': self.dashcoeff_supplement,
'pair_resolved': self.save_pairwise_disp,
'verbose': 1,
},
'molecule': molecule.to_schema(dtype=2),
'provenance': p4util.provenance_stamp(__name__),
})
jobrec = qcng.compute(
resi,
self.engine,
raise_error=True,
local_options={"scratch_directory": core.IOManager.shared_object().get_default_path()})
dashd_part = float(jobrec.extras['qcvars']['DISPERSION CORRECTION ENERGY'])
if wfn is not None:
for k, qca in jobrec.extras['qcvars'].items():
if ("CURRENT" not in k) and ("PAIRWISE" not in k):
wfn.set_variable(k, float(qca) if isinstance(qca, str) else qca)
# Pass along the pairwise dispersion decomposition if we need it
if self.save_pairwise_disp is True:
wfn.set_variable("PAIRWISE DISPERSION CORRECTION ANALYSIS",
jobrec.extras['qcvars']["2-BODY PAIRWISE DISPERSION CORRECTION ANALYSIS"])
if self.fctldash in ['hf3c', 'pbeh3c']:
jobrec = qcng.compute(
resi,
"gcp",
raise_error=True,
local_options={"scratch_directory": core.IOManager.shared_object().get_default_path()})
gcp_part = jobrec.return_result
dashd_part += gcp_part
return dashd_part
else:
ene = self.disp.compute_energy(molecule)
core.set_variable('DISPERSION CORRECTION ENERGY', ene)
if self.fctldash:
core.set_variable(f"{self.fctldash} DISPERSION CORRECTION ENERGY", ene)
return ene
def compute_gradient(self,
molecule: core.Molecule,
wfn: core.Wavefunction = None) -> core.Matrix:
"""Compute dispersion gradient based on engine, dispersion level, and parameters in `self`.
Parameters
----------
molecule
System for which to compute empirical dispersion correction.
wfn
Location to set QCVariables
Returns
-------
Matrix
(nat, 3) dispersion gradient [Eh/a0].
"""
if self.engine in ['dftd3', 'mp2d', "dftd4"]:
resi = AtomicInput(
**{
'driver': 'gradient',
'model': {
'method': self.fctldash,
'basis': '(auto)',
},
'keywords': {
'level_hint': self.dashlevel,
'params_tweaks': self.dashparams,
'dashcoeff_supplement': self.dashcoeff_supplement,
'verbose': 1,
},
'molecule': molecule.to_schema(dtype=2),
'provenance': p4util.provenance_stamp(__name__),
})
jobrec = qcng.compute(
resi,
self.engine,
raise_error=True,
local_options={"scratch_directory": core.IOManager.shared_object().get_default_path()})
dashd_part = core.Matrix.from_array(jobrec.extras['qcvars']['DISPERSION CORRECTION GRADIENT'])
if wfn is not None:
for k, qca in jobrec.extras['qcvars'].items():
if "CURRENT" not in k:
wfn.set_variable(k, float(qca) if isinstance(qca, str) else qca)
if self.fctldash in ['hf3c', 'pbeh3c']:
jobrec = qcng.compute(
resi,
"gcp",
raise_error=True,
local_options={"scratch_directory": core.IOManager.shared_object().get_default_path()})
gcp_part = core.Matrix.from_array(jobrec.return_result)
dashd_part.add(gcp_part)
return dashd_part
else:
return self.disp.compute_gradient(molecule)
def compute_hessian(self,
molecule: core.Molecule,
wfn: core.Wavefunction = None) -> core.Matrix:
"""Compute dispersion Hessian based on engine, dispersion level, and parameters in `self`.
Uses finite difference, as no dispersion engine has analytic second derivatives.
Parameters
----------
molecule
System for which to compute empirical dispersion correction.
wfn
Location to set QCVariables
Returns
-------
Matrix
(3*nat, 3*nat) dispersion Hessian [Eh/a0/a0].
"""
optstash = p4util.OptionsState(['PRINT'], ['PARENT_SYMMETRY'])
core.set_global_option('PRINT', 0)
core.print_out("\n\n Analytical Dispersion Hessians are not supported by any engine.\n")
core.print_out(" Computing the Hessian through finite difference of gradients.\n\n")
# Setup the molecule
molclone = molecule.clone()
molclone.reinterpret_coordentry(False)
molclone.fix_orientation(True)
molclone.fix_com(True)
# Record undisplaced symmetry for projection of diplaced point groups
core.set_global_option("PARENT_SYMMETRY", molecule.schoenflies_symbol())
findif_meta_dict = driver_findif.hessian_from_gradients_geometries(molclone, -1)
for displacement in findif_meta_dict["displacements"].values():
geom_array = np.reshape(displacement["geometry"], (-1, 3))
molclone.set_geometry(core.Matrix.from_array(geom_array))
molclone.update_geometry()
displacement["gradient"] = self.compute_gradient(molclone).np.ravel().tolist()
H = driver_findif.assemble_hessian_from_gradients(findif_meta_dict, -1)
if wfn is not None:
wfn.set_variable('DISPERSION CORRECTION HESSIAN', H)
optstash.restore()
return core.Matrix.from_array(H)
|
jturney/psi4
|
psi4/driver/procrouting/empirical_dispersion.py
|
Python
|
lgpl-3.0
| 15,011
|
[
"Psi4"
] |
d4f6bbafd682b5fde527a91263e0d313ba6e54322d69f63f7bdb06d8ca7141dc
|
import lan
import copy
import stringstream
import exchange
import collect_device as cd
import collect_gen as cg
import collect_id as ci
import collect_array as ca
import cgen
def print_dict_sorted(mydict):
keys = sorted(mydict)
entries = ""
for key in keys:
value = mydict[key]
entries += "'" + key + "': " + value.__repr__() + ","
return "{" + entries[:-1] + "}"
class SnippetGen(object):
def __init__(self, ast):
self.KernelStringStream = list()
self.ast = ast
def generate_kernel_ss(self, ast, kernelstringname):
self.rewrite_to_device_c_release(ast)
ssprint = stringstream.SSGenerator()
ssprint.create_kernel_string_stream(ast, kernelstringname)
return ssprint.newast
def in_source_kernel(self, ast, filename, kernelstringname):
newast = self.generate_kernel_ss(ast, kernelstringname)
cprint = cgen.CGenerator()
cprint.write_ast_to_file(newast, filename=filename)
def rewrite_to_device_c_release(self, ast):
arglist = self._create_arg_list()
# print arglist
self._swap_local_array_id()
my_kernel = self._create_kernel()
typeid = self._create_function_name()
newast = NewAST()
includes = cd.get_includes(ast)
newast.add_list_statement(copy.deepcopy(includes))
if self._arg_has_type_double(arglist):
newast.enable_double_precision()
newast.add_statement(lan.FuncDecl(typeid, lan.ArgList(arglist), my_kernel))
ast.ext = list()
ast.ext.append(newast.ast)
def _create_arg_list(self):
arglist = list()
kernel_args = cg.get_kernel_args(self.ast)
for n in sorted(kernel_args):
kernel_type = copy.deepcopy(kernel_args[n])
if kernel_type[0] == 'size_t':
kernel_type[0] = 'unsigned'
if len(kernel_type) == 2:
kernel_type.insert(0, '__global')
arglist.append(lan.TypeId(kernel_type, lan.Id(n)))
return arglist
def _swap_local_array_id(self):
local_swap = ci.get_local_swap(self.ast)
exchange_array_id = exchange.ExchangeArrayId(local_swap)
loop_arrays = ca.get_loop_arrays(self.ast)
for n in loop_arrays.values():
for m in n:
exchange_array_id.visit(m)
def _create_kernel(self):
my_kernel = copy.deepcopy(cd.get_kernel(self.ast))
num_array_dims = ca.get_num_array_dims(self.ast)
array_id_to_dim_name = cg.get_array_id_to_dim_name(self.ast)
rewrite_array_ref = exchange.RewriteArrayRef(num_array_dims,
array_id_to_dim_name)
rewrite_array_ref.visit(my_kernel)
idx_to_thread_id = cg.GenIdxToThreadId()
idx_to_thread_id.collect(self.ast)
index_to_thread_id = idx_to_thread_id.IndexToThreadId
exchange_indices = exchange.ExchangeId(index_to_thread_id)
exchange_indices.visit(my_kernel)
exchange_types = exchange.ExchangeTypes()
exchange_types.visit(my_kernel)
return my_kernel
def _create_function_name(self):
find_function = cd.FindFunction()
find_function.visit(self.ast)
dev_func_type_id = find_function.typeid
typeid = copy.deepcopy(dev_func_type_id)
typeid.type.insert(0, '__kernel')
return typeid
def _arg_has_type_double(self, arglist):
retval = False
for n in arglist:
if (len(n.type) == 3 and n.type[1] == 'double') \
or (len(n.type) != 3 and n.type[0] == 'double'):
retval = True
return retval
class NewAST(object):
def __init__(self):
self.ext = list()
self.ast = lan.FileAST(self.ext)
def add_list_statement(self, statement):
for stat in statement:
self.ext.append(stat)
def add_statement(self, statement):
self.ext.append(statement)
def enable_double_precision(self):
self.ext.insert(0, lan.Compound([lan.Id("#pragma OPENCL EXTENSION cl_khr_fp64: enable")]))
|
dikujepsen/OpenTran
|
v2.0/framework/Matmul/snippetgen.py
|
Python
|
mit
| 4,161
|
[
"VisIt"
] |
b3ceac8ff4ddce02c2dea47b520431607246db8ecffaa075a145342cfed2ae7a
|
#!/usr/bin/env python
"""
Module for various statistics utilities.
"""
import copy
from collections import OrderedDict as odict
import numpy as np
import numpy.lib.recfunctions as recfuncs
import scipy.special
import scipy.stats
# These should probably live in this file
from ugali.utils.bayesian_efficiency import bayesianInterval, binomialInterval
from ugali.utils import mlab
_alpha = 0.32
_nbins = 300
_npoints = 500
def mad_clip(data,mad=None,mad_lower=None,mad_upper=None):
med = np.median(data)
mad = np.median(np.fabs(med - data))
if mad is not None:
mad_lower = mad_upper = mad
return
def interval(best,lo=np.nan,hi=np.nan):
"""
Pythonized interval for easy output to yaml
"""
return [float(best),[float(lo),float(hi)]]
def mean_interval(data, alpha=_alpha):
"""
Interval assuming gaussian posterior.
"""
mean = np.mean(data)
sigma = np.std(data)
scale = scipy.stats.norm.ppf(1-alpha/2.)
return interval(mean,mean-scale*sigma,mean+scale*sigma)
def median_interval(data, alpha=_alpha):
"""
Median with bayesian credible interval from percentiles.
"""
q = [100*alpha/2., 50, 100*(1-alpha/2.)]
lo,med,hi = np.percentile(data,q)
return interval(med,lo,hi)
def peak(data, bins=_nbins):
"""
Bin the distribution and find the mode
Parameters:
-----------
data : The 1d data sample
bins : Number of bins
Returns
-------
peak : peak of the kde
"""
num,edges = np.histogram(data,bins=bins)
centers = (edges[1:]+edges[:-1])/2.
return centers[np.argmax(num)]
def kde_peak(data, npoints=_npoints, clip=5.0):
"""
Identify peak using Gaussian kernel density estimator.
Parameters:
-----------
data : The 1d data sample
npoints : The number of kde points to evaluate
clip : NMAD to clip
Returns
-------
peak : peak of the kde
"""
return kde(data,npoints,clip)[0]
def kde(data, npoints=_npoints, clip=5.0):
"""
Identify peak using Gaussian kernel density estimator.
Parameters:
-----------
data : The 1d data sample
npoints : The number of kde points to evaluate
clip : NMAD to clip
Returns
-------
peak : peak of the kde
"""
# Clipping of severe outliers to concentrate more KDE samples
# in the parameter range of interest
mad = np.median(np.fabs(np.median(data) - data))
if clip > 0:
cut = (data > np.median(data) - clip * mad)
cut &= (data < np.median(data) + clip * mad)
x = data[cut]
else:
x = data
kde = scipy.stats.gaussian_kde(x)
# No penalty for using a finer sampling for KDE evaluation
# except computation time
values = np.linspace(np.min(x), np.max(x), npoints)
kde_values = kde.evaluate(values)
peak = values[np.argmax(kde_values)]
return peak, kde.evaluate(peak)
def peak_interval(data, alpha=_alpha, npoints=_npoints):
"""Identify minimum interval containing the peak of the posterior as
determined by a Gaussian kernel density estimator.
Parameters
----------
data : the 1d data sample
alpha : the confidence interval
npoints: number of kde points to evaluate
Returns
-------
interval : the minimum interval containing the peak
"""
peak = kde_peak(data,npoints)
x = np.sort(data.flat); n = len(x)
# The number of entries in the interval
window = int(np.rint((1.0-alpha)*n))
# The start, stop, and width of all possible intervals
starts = x[:n-window]; ends = x[window:]
widths = ends - starts
# Just the intervals containing the peak
select = (peak >= starts) & (peak <= ends)
widths = widths[select]
if len(widths) == 0:
raise ValueError('Too few elements for interval calculation')
min_idx = np.argmin(widths)
lo = starts[select][min_idx]
hi = ends[select][min_idx]
return interval(peak,lo,hi)
def min_interval(data, alpha=_alpha):
"""Minimum interval containing 1-alpha of the posterior.
Note: interval is *not* required to contain the peak of the
posterior.
Parameters
----------
data : the 1d data sample
alpha : the confidence interval
Returns
-------
interval : the minimum interval
"""
x = np.sort(data.flat); n = len(x)
# The number of entries in the interval
window = int(np.rint((1.0-alpha)*n))
# The start, stop, and width of all possible intervals
starts = x[:n-window]; ends = x[window:]
widths = ends - starts
if len(widths) == 0:
raise ValueError('Too few elements for interval calculation')
min_idx = np.argmin(widths)
lo = starts[min_idx]
hi = ends[min_idx]
center = (hi+lo)/2.
return interval(center,lo,hi)
def norm_cdf(x):
"""Faster than scipy.stats.norm.cdf
https://en.wikipedia.org.wiki/Normal_distribution
"""
return 0.5*(1 + scipy.special.erf(x/np.sqrt(2)))
def random_pdf(value,pdf,size=None):
if size is None: size = 1.0
cdf = np.cumsum(pdf)
cdf /= cdf[-1]
fn = scipy.interpolate.interp1d(cdf, list(range(0, len(cdf))))
index = np.rint(fn(np.random.uniform(size=size))).astype(int)
return value[index]
def sky(lon=None,lat=None,size=1):
"""
Outputs uniform points on sphere from:
[0 < lon < 360] & [-90 < lat < 90]
"""
if lon is None:
umin,umax = 0,1
else:
lon = np.asarray(lon)
lon = np.radians(lon + 360.*(lon<0))
if lon.size==1: umin=umax=lon/(2*np.pi)
elif lon.size==2: umin,umax=lon/(2*np.pi)
else: raise Exception('...')
if lat is None:
vmin,vmax = -1,1
else:
lat = np.asarray(lat)
lat = np.radians(90 - lat)
if lat.size==1: vmin=vmax=np.cos(lat)
elif lat.size==2: vmin,vmax=np.cos(lat)
else: raise Exception('...')
phi = 2*np.pi*np.random.uniform(umin,umax,size=size)
theta = np.arcsin(np.random.uniform(vmin,vmax,size=size))
return np.degrees(phi),np.degrees(theta)
class Samples(np.recarray):
"""
Wrapper class for recarray to deal with MCMC samples.
A nice summary of various bayesian credible intervals can be found here:
http://www.sumsar.net/blog/2014/10/probable-points-and-credible-intervals-part-one/
"""
_alpha = 0.10
_nbins = 300
_npoints = 250
def __new__(cls, input, names=None):
# Load the array from file
if not isinstance(input,np.ndarray):
obj = np.load(input).view(cls)
else:
obj = np.asarray(input).view(cls)
# (re)set the column names
if names is not None:
if obj.dtype.names is None:
obj = np.rec.fromarrays(obj,names=names).view(cls)
else:
obj.dtype.names = names
return obj
def __array_wrap__(self, out_arr, context=None):
return np.ndarray.__array_wrap__(self,out_arr,context)
@property
def names(self):
return self.dtype.names
@property
def ndarray(self):
# atleast_2d is for
if len(self.dtype) == 1:
return np.expand_dims(self.view((float,len(self.dtype))),1)
else:
return self.view((float,len(self.dtype)))
def supplement(self,coordsys='gal'):
""" Add some supplemental columns """
from ugali.utils.projector import gal2cel, gal2cel_angle
from ugali.utils.projector import cel2gal, cel2gal_angle
coordsys = coordsys.lower()
kwargs = dict(usemask=False, asrecarray=True)
out = copy.deepcopy(self)
if ('lon' in out.names) and ('lat' in out.names):
# Ignore entries that are all zero
zeros = np.all(self.ndarray==0,axis=1)
if coordsys == 'gal':
ra,dec = gal2cel(out.lon,out.lat)
glon,glat = out.lon,out.lat
else:
ra,dec = out.lon,out.lat
glon,glat = cel2gal(out.lon,out.lat)
ra[zeros] = 0; dec[zeros] = 0
glon[zeros] = 0; glat[zeros] = 0
names = ['ra','dec','glon','glat']
arrs = [ra,dec,glon,glat]
out = mlab.rec_append_fields(out,names,arrs).view(Samples)
#out = recfuncs.append_fields(out,names,arrs,**kwargs).view(Samples)
if 'position_angle' in out.names:
if coordsys == 'gal':
pa_gal = out.position_angle
pa_cel = gal2cel_angle(out.lon,out.lat,out.position_angle)
pa_cel = pa_cel - 180.*(pa_cel > 180.)
else:
pa_gal = cel2gal_angle(out.lon,out.lat,out.position_angle)
pa_cel = out.position_angle
pa_gal = pa_gal - 180.*(pa_gal > 180.)
pa_gal[zeros] = 0; pa_cel[zeros] = 0
names = ['position_angle_gal','position_angle_cel']
arrs = [pa_gal,pa_cel]
out = recfuncs.append_fields(out,names,arrs,**kwargs).view(Samples)
return out
def get(self, names=None, burn=None, clip=None):
if names is None: names = list(self.dtype.names)
names = np.array(names,ndmin=1)
missing = names[~np.in1d(names,self.dtype.names)]
if len(missing):
msg = "field(s) named %s not found"%(missing)
raise ValueError(msg)
#idx = np.where(np.in1d(self.dtype.names,names))[0]
idx = np.array([self.dtype.names.index(n) for n in names])
# Remove zero entries
zsel = ~np.all(self.ndarray==0,axis=1)
# Remove burn entries
bsel = np.zeros(len(self),dtype=bool)
bsel[slice(burn,None)] = 1
data = self.ndarray[:,idx][bsel&zsel]
if clip is not None:
from astropy.stats import sigma_clip
mask = sigma_clip(data,sigma=clip,copy=False,axis=0).mask
data = data[np.where(~mask.any(axis=1))]
return data
@classmethod
def _interval(cls,best,lo,hi):
"""
Pythonized interval for easy output to yaml
"""
#return ugali.utils.stats.interval(best,lo,hi)
return interval(best,lo,hi)
def mean(self, name, **kwargs):
"""
Mean of the distribution.
"""
return np.mean(self.get(name,**kwargs))
def mean_interval(self, name, alpha=_alpha, **kwargs):
"""
Interval assuming gaussian posterior.
"""
data = self.get(name,**kwargs)
#return ugali.utils.stats.mean_interval(data,alpha)
return mean_interval(data,alpha)
def median(self, name, **kwargs):
"""
Median of the distribution.
"""
data = self.get(name,**kwargs)
return np.percentile(data,[50])
def median_interval(self, name, alpha=_alpha, **kwargs):
"""
Median including bayesian credible interval.
"""
data = self.get(name,**kwargs)
return median_interval(data,alpha)
def peak(self, name, bins=_nbins, **kwargs):
data = self.get(name,**kwargs)
return peak(data,bins=bins)
def kde_peak(self, name, npoints=_npoints, **kwargs):
"""
Calculate peak of kernel density estimator
"""
data = self.get(name,**kwargs)
return kde_peak(data,npoints)
def kde(self, name, npoints=_npoints, **kwargs):
"""
Calculate kernel density estimator for parameter
"""
data = self.get(name,**kwargs)
return kde(data,npoints)
def peak_interval(self, name, alpha=_alpha, npoints=_npoints, **kwargs):
"""
Calculate peak interval for parameter.
"""
data = self.get(name, **kwargs)
return peak_interval(data,alpha,npoints)
def min_interval(self,name, alpha=_alpha, **kwargs):
"""
Calculate minimum interval for parameter.
"""
data = self.get(name, **kwargs)
return min_interval(data,alpha)
def results(self, names=None, alpha=_alpha, mode='peak', **kwargs):
"""
Calculate the results for a set of parameters.
"""
if names is None: names = self.names
ret = odict()
for n in names:
ret[n] = getattr(self,'%s_interval'%mode)(n, **kwargs)
return ret
if __name__ == "__main__":
import argparse
description = "python script"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('args',nargs=argparse.REMAINDER)
opts = parser.parse_args(); args = opts.args
import pylab as plt
ax=plt.subplot(221,projection='aitoff')
ax.grid(True)
lon,lat = sky(size=1e3)
lon,lat= np.radians([lon-360.*(lon>180),lat])
ax.scatter(lon,lat,marker='.',s=2)
ax=plt.subplot(222,projection='aitoff')
ax.grid(True)
lon,lat = sky(size=1e3,lat=[30,45])
lon,lat= np.radians([lon-360.*(lon>180),lat])
ax.scatter(lon,lat,marker='.',s=2)
ax=plt.subplot(223,projection='aitoff')
ax.grid(True)
lon,lat = sky(size=1e3,lon=[30,45])
lon,lat= np.radians([lon-360.*(lon>180),lat])
ax.scatter(lon,lat,marker='.',s=2)
ax=plt.subplot(224,projection='aitoff')
ax.grid(True)
lon,lat = sky(size=1e3,lon=[0,45],lat=[30,45])
lon,lat= np.radians([lon-360.*(lon>180),lat])
ax.scatter(lon,lat,marker='.',s=2)
|
DarkEnergySurvey/ugali
|
ugali/utils/stats.py
|
Python
|
mit
| 13,490
|
[
"Gaussian"
] |
c4f3f2ec70fe7f621b063d4498cf88ba59e99379e04b1f8e8d4eb83649ae40a0
|
"""
General-purpose functions that map R -> [0,1].
These functions work as closures.
The inner function uses the variables of the outer function.
These functions work in two steps: prime and call.
In the first step the function is constructed, initialized and
constants pre-evaluated. In the second step the actual value
is passed into the function, using the arguments of the first step.
Definitions
-----------
These functions are used to determine the *membership* of a value x in a fuzzy-
set. Thus, the 'height' is the variable 'm' in general.
In a normal set there is at least one m with m == 1. This is the default.
In a non-normal set, the global maximum and minimum is skewed.
The following definitions are for normal sets.
The intervals with non-zero m are called 'support', short s_m
The intervals with m == 1 are called 'core', short c_m
The intervals with max(m) are called "height"
The intervals m != 1 and m != 0 are called 'boundary'.
The intervals with m == 0 are called 'unsupported', short no_m
In a fuzzy set with one and only one m == 1, this element is called 'prototype'.
"""
from math import exp, log, sqrt, isinf, isnan
#####################
# SPECIAL FUNCTIONS #
#####################
def inv(g):
"""Invert the given function within the unit-interval.
For sets, the ~ operator uses this. It is equivalent to the TRUTH value of FALSE.
"""
def f(x):
return 1 - g(x)
return f
def noop():
"""Do nothing and return the value as is.
Useful for testing.
"""
def f(x):
return x
return f
def constant(c):
"""Return always the same value, no matter the input.
Useful for testing.
>>> f = constant(1)
>>> f(0)
1
"""
def f(x):
return c
return f
def alpha(*, floor=0, ceiling=1, func,
floor_clip=None, ceiling_clip=None):
"""Clip a function's values.
This is used to either cut off the upper or lower part of a graph.
Actually, this is more like a hedge but doesn't make sense for sets.
"""
assert floor <= ceiling
assert 0 <= floor
assert ceiling <= 1
floor_clip = floor if floor_clip is None else floor_clip
ceiling_clip = ceiling if ceiling_clip is None else ceiling_clip
#assert 0 <= floor_clip <= ceiling_clip <= 1, "%s <= %s"%(floor_clip, ceiling_clip)
def f(x):
m = func(x)
if m >= ceiling:
return ceiling_clip
elif m <= floor:
return floor_clip
else:
return m
return f
def normalize(height, func):
"""Map [0,1] to [0,1] so that max(array) == 1."""
assert 0 < height <= 1
def f(x):
return func(x) / height
return f
def moderate(func):
"""Map [0,1] -> [0,1] with bias towards 0.5.
For instance this is needed to dampen extremes.
"""
def f(x):
return 1/2 + 4 * (func(x) - 1/2)**3
return f
########################
# MEMBERSHIP FUNCTIONS #
########################
def singleton(p, *, no_m=0, c_m=1):
"""A single spike.
>>> f = singleton(2)
>>> f(1)
0
>>> f(2)
1
"""
assert 0 <= no_m < c_m <= 1
def f(x):
return c_m if x == p else no_m
return f
def linear(m:float=0, b:float=0) -> callable:
"""A textbook linear function with y-axis section and gradient.
f(x) = m*x + b
BUT CLIPPED.
>>> f = linear(1, -1)
>>> f(-2) # should be -3 but clipped
0
>>> f(0) # should be -1 but clipped
0
>>> f(1)
0
>>> f(1.5)
0.5
>>> f(2)
1
>>> f(3) # should be 2 but clipped
1
"""
def f(x) -> float:
y = m * x + b
if y <= 0:
return 0
elif y >= 1:
return 1
else:
return y
return f
def bounded_linear(low, high, *, c_m=1, no_m=0, inverse=False):
"""Variant of the linear function with gradient being determined by bounds.
The bounds determine minimum and maximum value-mappings,
but also the gradient. As [0, 1] must be the bounds for y-values,
left and right bounds specify 2 points on the graph, for which the formula
f(x) = y = (y2 - y1) / (x2 - x1) * (x - x1) + y1 = (y2 - y1) / (x2 - x1) *
(x - x2) + y2
(right_y - left_y) / ((right - left) * (x - self.left) + left_y)
works.
>>> f = bounded_linear(2, 3)
>>> f(1)
0.0
>>> f(2)
0.0
>>> f(2.5)
0.5
>>> f(3)
1.0
>>> f(4)
1.0
"""
assert low < high, "low must be less than high"
assert c_m > no_m, "core_m must be greater than unsupported_m"
if inverse:
c_m, no_m = no_m, c_m
gradient = (c_m - no_m) / (high - low)
# special cases found by hypothesis
def g_0(x):
return (c_m + no_m) / 2
if gradient == 0:
return g_0
def g_inf(x):
asymptode = (high + low) / 2
if x < asymptode:
return no_m
elif x > asymptode:
return c_m
else:
return (c_m + no_m) / 2
if isinf(gradient):
return g_inf
def f(x):
y = gradient * (x - low) + no_m
if y < 0:
return 0.
if y > 1:
return 1.
return y
return f
def R(low, high):
"""Simple alternative for bounded_linear().
THIS FUNCTION ONLY CAN HAVE A POSITIVE SLOPE -
USE THE S() FUNCTION FOR NEGATIVE SLOPE.
"""
assert low < high, f"{low} >? {high}"
def f(x):
if x < low or isinf(high - low):
return 0
if low <= x <= high:
return (x - low) / (high - low)
if x > high:
return 1
return f
def S(low, high):
"""Simple alternative for bounded_linear.
THIS FUNCTION ONLY CAN HAVE A NEGATIVE SLOPE -
USE THE R() FUNCTION FOR POSITIVE SLOPE.
"""
assert low < high, f"{low}, {high}"
def f(x):
if x <= low:
return 1
if low < x < high:
# factorized to avoid nan
return high / (high - low) - x / (high - low)
if high <= x:
return 0
return f
def rectangular(low:float, high:float, *, c_m:float=1, no_m:float=0) -> callable:
"""Basic rectangular function that returns the core_y for the core else 0.
______
| |
____| |___
"""
assert low < high, f'{low}, {high}'
def f(x:float) -> float:
if x < low:
return no_m
if low <= x <= high:
return c_m
if high < x:
return no_m
return f
def triangular(low, high, *, c=None, c_m=1, no_m=0):
r"""Basic triangular norm as combination of two linear functions.
/\
____/ \___
"""
assert low < high, 'low must be less than high.'
assert no_m < c_m
c = c if c is not None else (low + high) / 2.
assert low < c < high, "peak must be inbetween"
left_slope = bounded_linear(low, c, no_m=0, c_m=c_m)
right_slope = inv(bounded_linear(c, high, no_m=0, c_m=c_m))
def f(x):
return left_slope(x) if x <= c else right_slope(x)
return f
def trapezoid(low, c_low, c_high, high, *, c_m=1, no_m=0):
r"""Combination of rectangular and triangular, for convenience.
____
/ \
____/ \___
"""
assert low < c_low <= c_high < high
assert 0 <= no_m < c_m <= 1
left_slope = bounded_linear(low, c_low, c_m=c_m, no_m=no_m)
right_slope = bounded_linear(c_high, high, c_m=c_m, no_m=no_m,
inverse=True)
def f(x):
if x < low or high < x:
return no_m
elif x < c_low:
return left_slope(x)
elif x > c_high:
return right_slope(x)
else:
return c_m
return f
def sigmoid(L, k, x0):
"""Special logistic function.
http://en.wikipedia.org/wiki/Logistic_function
f(x) = L / (1 + e^(-k*(x-x0)))
with
x0 = x-value of the midpoint
L = the curve's maximum value
k = steepness
"""
# need to be really careful here, otherwise we end up in nanland
assert 0 < L <= 1, 'L invalid.'
def f(x):
if isnan(k*x):
# e^(0*inf) = 1
o = 1
else:
try:
o = exp(-k*(x - x0))
except OverflowError:
o = float("inf")
return L / (1 + o)
return f
def bounded_sigmoid(low, high, inverse=False):
"""
Calculate a weight based on the sigmoid function.
Specify the lower limit where f(x) = 0.1 and the
upper with f(x) = 0.9 and calculate the steepness and elasticity
based on these. We don't need the general logistic function as we
operate on [0,1].
core idea:
f(x) = 1. / (1. + exp(x * (4. * log(3)) / (low - high)) *
9 * exp(low * -(4. * log(3)) / (low - high)))
How I got this? IIRC I was playing around with linear equations and
boundary conditions of sigmoid funcs on wolframalpha..
previously factored to:
k = -(4. * log(3)) / (low - high)
o = 9 * exp(low * k)
return 1 / (1 + exp(-k * x) * o)
vars
----
low: x-value with f(x) = 0.1
for x < low: m -> 0
high: x-value with f(x) = 0.9
for x > high: m -> 1
>>> f = bounded_sigmoid(0, 1)
>>> f(0)
0.1
>>> round(f(1), 2)
0.9
>>> round(f(100000), 2)
1.0
>>> round(f(-100000), 2)
0.0
"""
assert low < high, 'low must be less than high'
if inverse:
low, high = high, low
k = (4. * log(3)) / (low - high)
try:
# if high - low underflows to 0..
if isinf(k):
p = 0
# just in case k -> 0 and low -> inf
elif isnan(-k * low):
p = 1
else:
p = exp(-k * low)
except OverflowError:
p = float("inf")
def f(x):
try:
# e^(0*inf) = 1 for both -inf and +inf
if (isinf(k) and x == 0) or (k == 0 and isinf(x)):
q = 1
else: q = exp(x * k)
except OverflowError:
q = float("inf")
# e^(inf)*e^(-inf) = 1
r = p * q
if isnan(r):
r = 1
return 1 / (1 + 9 * r)
return f
def bounded_exponential(k=0.1, limit=1):
"""Function that goes through the origin and approaches a limit.
k determines the steepness. The function defined for [0, +inf).
Useful for things that can't be below 0 but may not have a limit like temperature
or time, so values are always defined.
f(x)=limit-limit/e^(k*x)
Again: This function assumes x >= 0, there are no checks for this assumption!
"""
assert limit > 0
assert k > 0
def f(x):
try:
return limit - limit/exp(k*x)
except OverflowError:
return limit
return f
def simple_sigmoid(k=0.229756):
"""Sigmoid variant with only one parameter (steepness).
The midpoint is 0.
The slope is positive for positive k and negative k.
f(x) is within [0,1] for any real k and x.
>>> f = simple_sigmoid()
>>> round(f(-1000), 2)
0.0
>>> f(0)
0.5
>>> round(f(1000), 2)
1.0
>>> round(f(-20), 2)
0.01
>>> round(f(20), 2)
0.99
"""
def f(x):
# yay for limits..
if (isinf(x) and k == 0):
return 1/2
else:
try:
return 1 / (1 + exp(x * -k))
except OverflowError:
return 0.
return f
def triangular_sigmoid(low, high, c=None):
"""Version of triangular using sigmoids instead of linear.
THIS FUNCTION PEAKS AT 0.9
>>> g = triangular_sigmoid(2, 4)
>>> g(2)
0.1
>>> round(g(3), 2)
0.9
"""
assert low < high, "low must be less than high"
c = c if c is not None else (low + high) / 2.
assert low < c < high, "c must be inbetween"
left_slope = bounded_sigmoid(low, c)
right_slope = inv(bounded_sigmoid(c, high))
def f(x):
if x <= c:
return left_slope(x)
else:
return right_slope(x)
return f
def gauss(c, b, *, c_m=1):
"""Defined by ae^(-b(x-x0)^2), a gaussian distribution.
Basically a triangular sigmoid function, it comes close to human perception.
vars
----
c_m (a)
defines the maximum y-value of the graph
b
defines the steepness
c (x0)
defines the symmetry center/peak of the graph
"""
assert 0 < c_m <= 1
assert 0 < b, "b must be greater than 0"
def f(x):
try:
o = (x - c)**2
except OverflowError:
return 0
return c_m * exp(-b * o)
return f
if __name__ == "__main__":
import doctest
doctest.testmod()
|
amogorkon/fuzzy
|
src/fuzzylogic/functions.py
|
Python
|
mit
| 12,953
|
[
"Gaussian"
] |
8c25ece14e30c55f047d9abcba82c9e014166de7c372d637a0bec4cf39f0460f
|
#!/usr/bin/env python
####################################################################################################
# @file command.py
# @package
# @author
# @date 2009/01/12
# @version 0.1
#
# @mainpage
#
####################################################################################################
import os
import sys
import platform
from octopus.core.enums.command import CMD_RUNNING
## This class represents a Command for the worker
#
class Command(object):
def __init__(
self, id, runner, arguments={},
validationExpression="VAL_TRUE",
taskName="",
relativePathToLogDir="",
message="",
environment={},
runnerPackages=None,
watcherPackages=None
):
'''
:param id: command id
:param arguments: command arguments as a dict
:param validationExpression: -
:param taskName: a string representing the parent task name
:param relativePathToLogDir: relative path to log
:param message:
:param environment: A dict of env vars which will be added to current os.environ
'''
self.status = CMD_RUNNING
self.id = id
self.completion = 0
self.arguments = arguments.copy()
self.runner = runner
self.validationExpression = validationExpression
self.validatorMessage = None
self.errorInfos = None
self.taskName = taskName
self.relativePathToLogDir = relativePathToLogDir
self.message = message
self.environment = os.environ.copy()
self.environment.update(environment)
# Setting REZ packages to use when starting the runner and when starting the command watcher
self.runnerPackages = runnerPackages
self.watcherPackages = watcherPackages
|
mikrosimage/OpenRenderManagement
|
src/octopus/worker/model/command.py
|
Python
|
bsd-3-clause
| 1,873
|
[
"Octopus"
] |
1a98658fd2155ea124380bc6e18ada9924e43a439a19d5c3da584b865ff7c9fc
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from sympy import init_printing
init_printing()
import numpy as np
import sympy as sy
import matplotlib.pyplot as plt
print(f"numpy version: {np.__version__}")
print(f"sympy version: {sy.__version__}")
# Define the simple functions in terms
# of the location parameter
# We define the distributions according to
# the scipy.stats module
x, y = sy.symbols("x y", real=True)
c = sy.symbols("c", real=True, finite=True, positive=True)
n, N = sy.symbols("n N", integer=True, finite=True)
pdf = sy.Function("pdf")
cdf = sy.Function("cdf")
sf = sy.Function("sf")
# For the methfessel-paxton distribution
hermite = sy.functions.special.polynomials.hermite
class Distribution:
def __init__(self, name, pdf=None, cdf=None, sf=None, entropy=None):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.sf = sf
self.entropy = entropy
def __eq__(self, name):
return self.name == name
distributions = [
Distribution("fd",
pdf=(1-1/(sy.exp(x)+1)).diff(x),
sf=1/(sy.exp(x)+1)),
Distribution("mp",
pdf=sy.exp(-(x)**2)/sy.sqrt(sy.pi)*sy.Sum(hermite(2*n, x), (n, 0, N)),
cdf=1/sy.sqrt(sy.pi)*sy.Sum(
(sy.exp(-(x)**2)*hermite(2*n, x))
.integrate(x)
.doit(simplify=True)
.expand()
.simplify(), (n, 0, N)),
entropy=-1/sy.sqrt(sy.pi)*sy.Sum(
(sy.exp(-(x)**2)*hermite(2*n, x) * x)
.integrate((x, -sy.oo, y)).subs(y, x)
.doit(simplify=True)
.expand()
.simplify(), (n, 0, N))),
Distribution("gaussian",
pdf=sy.exp(-(x)**2/2)/sy.sqrt(2*sy.pi)),
Distribution("cauchy",
pdf=1/(sy.pi*(1+(x)**2))),
Distribution("cold",
pdf=1/sy.sqrt(sy.pi)*sy.exp(-(-x-1/sy.sqrt(2))**2)*(2+sy.sqrt(2)*x))
]
# Define plots
fig, axs = plt.subplots(3, 1)
E = np.linspace(-10, 10, 1001)
axs[0].set_title("PDF")
axs[1].set_title("theta|sf")
axs[2].set_title("entropy")
for dist in distributions:
print(f"\nProcessing {dist.name}")
# First fill the values
if dist.pdf is None:
dist.pdf = dist.cdf.diff(x)
norm = sy.integrate(dist.pdf.subs(N, 0), (x, -sy.oo, sy.oo)).subs(c, 1).evalf()
# Ensure normalization for consistency
# For utilization in the scipy.stats module it should have normalization 1
assert norm == 1, norm
print(f" pdf|delta = {dist.pdf}")
if dist.cdf is None:
dist.cdf = sy.integrate(dist.pdf.expand(), x).doit(simplify=True).simplify()
# Ensure that the cdf is 0 at -inf
# The CDF is zero @ - inf
# The CDF is 1 @ + inf
cneg = dist.cdf.subs(x, -sy.oo)
dist.cdf = (dist.cdf - cneg).expand().simplify()
print(f" cdf = {dist.cdf}")
#print(f" cdf*= {dist.cdf.subs(dist.pdf, pdf)}")
# plot it...
func = dist.pdf.subs(N, 0).subs(c, 1).expand().simplify()
func = sy.lambdify(x, func, 'numpy')
axs[0].plot(E, func(E), label=dist.name)
if dist.sf is None:
dist.sf = (1 - dist.cdf).expand().simplify()
print(f" sf|theta = {dist.sf}")
#print(f" d sf|theta = {-dist.pdf.expand().doit(simplify=True).simplify()}")
#print(f" sf|theta*= {dist.sf.subs(dist.pdf, pdf).subs(dist.cdf, cdf)}")
func = dist.sf.subs(N, 0).subs(c, 1).expand().simplify()
func = sy.lambdify(x, func, 'numpy')
try:
# cold function may fail
axs[1].plot(E, [func(e) for e in E], label=dist.name)
except: pass
if dist.entropy is None:
dist.entropy = -(dist.pdf*x).integrate((x, -sy.oo, x)).doit(simplify=True).simplify()
print(f" entropy = {dist.entropy}")
func = dist.entropy.subs(N, 0).subs(c, 1).expand().simplify()
func = sy.lambdify(x, func, 'numpy')
try:
# cold function may fail
axs[2].plot(E, [func(e) for e in E], label=dist.name)
except: pass
var = (dist.pdf*x*x).integrate((x, -sy.oo, sy.oo)).doit(simplify=True).simplify()
print(f" variance = {var}")
# Check that the FD distribution is equivalent to what we find
ifd = distributions.index("fd")
fd = distributions[ifd]
# Check that it finds the same entropy
fd_enpy = -(fd.sf * sy.log(fd.sf) + (1-fd.sf)*sy.log(1-fd.sf))
assert (fd_enpy - fd.entropy).simplify() == 0.
axs[0].legend()
axs[1].legend()
axs[2].legend()
plt.show()
|
zerothi/sisl
|
developments/distributions.py
|
Python
|
mpl-2.0
| 4,701
|
[
"Gaussian"
] |
9d7a4867516e1484ff885966560f8d9adb193dbee37e801eed59af57d0d64117
|
'''Screen
======
This module changes some environment and configuration variables
to match the density / dpi / screensize of a specific device.
To see a list of the available screenid's, just run::
python main.py -m screen
To simulate a medium-density screen such as the Motorola Droid 2::
python main.py -m screen:droid2
To simulate a high-density screen such as HTC One X, in portrait::
python main.py -m screen:onex,portrait
To simulate the iPad 2 screen::
python main.py -m screen:ipad
If the generated window is too large, you can specify a scale::
python main.py -m screen:note2,portrait,scale=.75
Note that to display your contents correctly on a scaled window you
must consistently use units 'dp' and 'sp' throughout your app. See
:mod:`~kiv.metrics` for more details.
'''
import sys
from os import environ
from kivy.config import Config
from kivy.logger import Logger
# taken from http://en.wikipedia.org/wiki/List_of_displays_by_pixel_density
devices = {
# device: (name, width, height, dpi, density)
'onex': ('HTC One X', 1280, 720, 312, 2),
'one': ('HTC One', 1920, 1080, 468, 3),
'onesv': ('HTC One SV', 800, 480, 216, 1.5),
's3': ('Galaxy SIII', 1280, 720, 306, 2),
'note2': ('Galaxy Note II', 1280, 720, 267, 2),
'droid2': ('Motorola Droid 2', 854, 480, 240, 1.5),
'xoom': ('Motorola Xoom', 1280, 800, 149, 1),
'ipad': ('iPad (1 and 2)', 1024, 768, 132, 1),
'ipad3': ('iPad 3', 2048, 1536, 264, 2),
'iphone4': ('iPhone 4', 960, 640, 326, 2),
'iphone5': ('iPhone 5', 1136, 640, 326, 2),
'xperiae': ('Xperia E', 480, 320, 166, 1),
'nexus4': ('Nexus 4', 1280, 768, 320, 2),
'nexus7': ('Nexus 7 (2012 version)', 1280, 800, 216, 1.325),
'nexus7.2': ('Nexus 7 (2013 version)', 1920, 1200, 323, 2),
#taken from design.google.com/devices
#please consider using another data instead of a dict for autocompletion to work
#these are all in landscape
'phone_android_one':('Android One',854,480,218,1.5),
'phone_htc_one_m8':('HTC One M8',1920,1080,432,3.0),
'phone_htc_one_m9':('HTC One M9',1920,1080,432,3.0),
'phone_iphone':('iPhone',480,320,168,1.0),
'phone_iphone_4':('iPhone 4',960,640,320,2.0),
'phone_iphone_5':('iPhone 5',1136,640,320,2.0),
'phone_iphone_6':('iPhone 6',1334,750,326,2.0),
'phone_iphone_6_plus':('iPhone 6 Plus',1920,1080,400,3.0),
'phone_lg_g2':('LG G2',1920,1080,432,3.0),
'phone_lg_g3':('LG G3',2560,1440,533,3.0),
'phone_moto_g':('Moto G',1280,720,327,2.0),
'phone_moto_x':('Moto X',1280,720,313,2.0),
'phone_moto_x_2nd_gen':('Moto X 2nd Gen',1920,1080,432,3.0),
'phone_nexus_4':('Nexus 4',1280,768,240,2.0),
'phone_nexus_5':('Nexus 5',1920,1080,450,3.0),
'phone_nexus_5x':('Nexus 5X',1920,1080,432,2.6),
'phone_nexus_6':('Nexus 6',2560,1440,496,3.5),
'phone_nexus_6p':('Nexus 6P',2560,1440,514,3.5),
'phone_samsung_galaxy_note_4':('Samsung Galaxy Note 4',2560,1440,514,3.0),
'phone_samsung_galaxy_s5':('Samsung Galaxy S5',1920,1080,372,3.0),
'phone_samsung_galaxy_s6':('Samsung Galaxy S6',2560,1440,576,4.0),
'phone_sony_xperia_c4':('Sony Xperia C4',1920,1080,400,2.0),
'phone_sony_xperia_z_ultra':('Sony Xperia Z Ultra',1920,1080,348,2.0),
'phone_sony_xperia_z1_compact':('Sony Xperia Z1 Compact',1280,720,342,2.0),
'phone_sony_xperia_z2z3':('Sony Xperia Z2/Z3',1920,1080,432,3.0),
'phone_sony_xperia_z3_compact':('Sony Xperia Z3 Compact',1280,720,313,2.0),
'tablet_dell_venue_8':('Dell Venue 8',2560,1600,355,2.0),
'tablet_ipad':('iPad',1024,768,132,1.0),
'tablet_ipad_mini':('iPad Mini',1024,768,163,1.0),
'tablet_ipad_mini_retina':('iPad Mini Retina',2048,1536,326,2.0),
'tablet_ipad_pro':('iPad Pro',2732,2048,265,2.0),
'tablet_ipad_retina':('iPad Retina',2048,1536,264,2.0),
'tablet_nexus_10':('Nexus 10',2560,1600,297,2.0),
'tablet_nexus_7_12':('Nexus 7 12',1280,800,216,1.3),
'tablet_nexus_7_13':('Nexus 7 13',1920,1200,324,2.0),
'tablet_nexus_9':('Nexus 9',2048,1536,288,2.0),
'tablet_samsung_galaxy_tab_10':('Samsung Galaxy Tab 10',1280,800,148,1.0),
'tablet_sony_xperia_z3_tablet':('Sony Xperia Z3 Tablet',1920,1200,282,2.0),
'tablet_sony_xperia_z4_tablet':('Sony Xperia Z4 Tablet',2560,1600,297,2.0)
}
def start(win, ctx):
pass
def stop(win, ctx):
pass
def apply_device(device, scale, orientation):
name, width, height, dpi, density = devices[device]
if orientation == 'portrait':
width, height = height, width
Logger.info('Screen: Apply screen settings for {0}'.format(name))
Logger.info('Screen: size={0}x{1} dpi={2} density={3} '
'orientation={4}'.format(width, height, dpi, density,
orientation))
try:
scale = float(scale)
except:
scale = 1
environ['KIVY_METRICS_DENSITY'] = str(density * scale)
environ['KIVY_DPI'] = str(dpi * scale)
Config.set('graphics', 'width', str(int(width * scale)))
# simulate with the android bar
# FIXME should be configurable
Config.set('graphics', 'height', str(int(height * scale - 25 * density)))
Config.set('graphics', 'fullscreen', '0')
Config.set('graphics', 'show_mousecursor', '1')
def usage(device=None):
if device:
Logger.error('Screen: The specified device ({0}) is unknown.',
device)
print('\nModule usage: python main.py -m screen:deviceid[,orientation]\n')
print('Available devices:\n')
print('{0:12} {1:<22} {2:<8} {3:<8} {4:<5} {5:<8}'.format(
'Device ID', 'Name', 'Width', 'Height', 'DPI', 'Density'))
for device, info in devices.items():
print('{0:12} {1:<22} {2:<8} {3:<8} {4:<5} {5:<8}'.format(
device, *info))
print('\n')
print('Simulate a medium-density screen such as Motorola Droid 2:\n')
print(' python main.py -m screen:droid2\n')
print('Simulate a high-density screen such as HTC One X, in portrait:\n')
print(' python main.py -m screen:onex,portrait\n')
print('Simulate the iPad 2 screen\n')
print(' python main.py -m screen:ipad\n')
print('If the generated window is too large, you can specify a scale:\n')
print(' python main.py -m screen:note2,portrait,scale=.75\n')
sys.exit(1)
def configure(ctx):
scale = ctx.pop('scale', None)
orientation = 'landscape'
ctx.pop('landscape', None)
if ctx.pop('portrait', None):
orientation = 'portrait'
if not ctx:
return usage(None)
device = list(ctx.keys())[0]
if device not in devices:
return usage('')
apply_device(device, scale, orientation)
if __name__ == "__main__":
for n in devices.values():
assert n[1] > n[2]
|
darkopevec/kivy
|
kivy/modules/screen.py
|
Python
|
mit
| 6,792
|
[
"Galaxy"
] |
6005884c70399ac6c2553d4e826f60d6ede0c3d489f5288bc103a3c04c938cda
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import sys
import time
import numpy as np
import psi4
try:
from ipi.interfaces.clients import Client
ipi_available = True
except ImportError:
ipi_available = False
# Define Client to enable testing of the Broker in the unittests
class Client():
pass
class IPIBroker(Client):
def __init__(self, LOT, options=None, serverdata=False, molecule=None):
self.serverdata = serverdata
if not ipi_available:
psi4.core.print_out("i-pi is not available for import: ")
psi4.core.print_out("The broker infrastructure will not be available!\n")
super(IPIBroker, self).__init__()
elif serverdata:
mode, address, port = serverdata.split(":")
mode = mode.lower()
super(IPIBroker, self).__init__(address=address, port=port, mode=mode)
else:
super(IPIBroker, self).__init__(_socket=False)
self.LOT = LOT
self.options = options if options else {}
if molecule is None:
molecule = psi4.core.get_active_molecule()
self.initial_molecule = molecule
assert self.initial_molecule.orientation_fixed(), "Orientation must be fixed!"
assert self.initial_molecule.point_group().symbol() == "c1", "Symmetry must be 'c1'!"
names = [self.initial_molecule.symbol(i) for i in range(self.initial_molecule.natom())]
psi4.core.print_out("Initial atoms %s\n" % names)
self.atoms_list = names
psi4.core.print_out("Psi4 options:\n")
for item, value in self.options.items():
psi4.core.print_out("%s %s\n" % (item, value))
psi4.core.set_global_option(item, value)
psi4.core.IO.set_default_namespace("xwrapper")
self.timing = {}
atoms = np.array(self.initial_molecule.geometry())
psi4.core.print_out("Initial atoms %s\n" % atoms)
psi4.core.print_out("Force:\n")
self._positions = atoms
self._callback = self.callback
self._nat = np.int32(len(atoms))
def calculate_force(self, pos=None, **kwargs):
"""Fetch force, energy of PSI.
Arguments:
- pos: positions of the atoms as array. If None, the positions of the current active
molecule is used.
"""
if pos is None:
molecule = psi4.core.get_active_molecule()
pos = np.array(molecule.geometry())
self._force, self._potential = self.callback(pos, **kwargs)
return self._force, self._potential
def callback(self, pos, **kwargs):
"""Initialize psi with new positions and calculate force.
Arguments:
- pos: positions of the atoms as array.
"""
self.initial_molecule.set_geometry(psi4.core.Matrix.from_array(pos))
self.calculate_gradient(self.LOT, pos=pos, **kwargs)
self._potential = psi4.variable('CURRENT ENERGY')
self._force = -np.array(self.grd)
self._vir = np.array([[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]])
return self._force, np.float64(self._potential)
def calculate_gradient(self, LOT, bypass_scf=False, **kwargs):
"""Calculate the gradient with @LOT.
When bypass_scf=True a hf energy calculation has been done before.
"""
start = time.time()
self.grd = psi4.gradient(LOT, bypass_scf=bypass_scf, **kwargs)
time_needed = time.time() - start
self.timing[LOT] = self.timing.get(LOT, []) + [time_needed]
def ipi_broker(LOT, molecule=None, serverdata=False, options=None):
""" Run IPIBroker to connect to i-pi
Arguments:
molecule: Initial molecule
serverdata: Configuration where to connect to ipi
options: any additional Psi4 options
"""
b = IPIBroker(LOT, molecule=molecule, serverdata=serverdata, options=options)
try:
if b.serverdata:
b.run()
else:
return b
except KeyboardInterrupt:
psi4.core.print_out("Killing IPIBroker\n")
b.__del__() # lgtm [py/explicit-call-to-delete]
sys.exit(1)
|
psi4/psi4
|
psi4/driver/ipi_broker.py
|
Python
|
lgpl-3.0
| 5,014
|
[
"Psi4"
] |
da024fbf5485df37ac2530b0f44975e49bffc235999ebabdd8c5c894b847d473
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'PresentMedicalHistory.others'
db.delete_column(u'patient_presentmedicalhistory', 'others')
# Deleting field 'PastMedicalHistory.others'
db.delete_column(u'patient_pastmedicalhistory', 'others')
# Deleting field 'FamilyMedicalHistory.others'
db.delete_column(u'patient_familymedicalhistory', 'others')
def backwards(self, orm):
# Adding field 'PresentMedicalHistory.others'
db.add_column(u'patient_presentmedicalhistory', 'others',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
# Adding field 'PastMedicalHistory.others'
db.add_column(u'patient_pastmedicalhistory', 'others',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
# Adding field 'FamilyMedicalHistory.others'
db.add_column(u'patient_familymedicalhistory', 'others',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.additionalpatientinformation': {
'Meta': {'object_name': 'AdditionalPatientInformation'},
'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'patient.familymedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'FamilyMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.guardian': {
'Meta': {'object_name': 'Guardian'},
'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'home_address': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.gynaecologicalhistory': {
'Meta': {'object_name': 'GynaecologicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'previous_surgery': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousSurgery']"}),
'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.immunizationhistory': {
'Meta': {'object_name': 'ImmunizationHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {}),
'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {}),
'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {}),
'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.laboratorytest': {
'Meta': {'object_name': 'LaboratoryTest'},
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.medicalhistory': {
'Meta': {'object_name': 'MedicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.FamilyMedicalHistory']"}),
'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.GynaecologicalHistory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ImmunizationHistory']"}),
'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.MenstrualHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ObstetricHistory']"}),
'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PastMedicalHistory']"}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PresentMedicalHistory']"})
},
u'patient.menstrualhistory': {
'Meta': {'object_name': 'MenstrualHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_visit': ('django.db.models.fields.DateField', [], {}),
'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}),
'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.obstetrichistory': {
'Meta': {'object_name': 'ObstetricHistory'},
'check_if_you_have_been_miscarriages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'check_if_you_have_been_pregnant': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_previous_obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousObstetricHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.pastmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PastMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.patientinformation': {
'Meta': {'object_name': 'PatientInformation'},
'address': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'patient.prescription': {
'Meta': {'object_name': 'Prescription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_prescription': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.presentmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PresentMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.previousobstetrichistory': {
'Meta': {'object_name': 'PreviousObstetricHistory'},
'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.DateField', [], {})
},
u'patient.previoussurgery': {
'Meta': {'object_name': 'PreviousSurgery'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endometriosis': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'fibrocystic_breasts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'ovarian_cysts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'uterine_fibroids': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'patient.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.routinecheckup': {
'Meta': {'object_name': 'Routinecheckup'},
'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.signanaemia': {
'Meta': {'object_name': 'Signanaemia'},
'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.ultrasoundscanning': {
'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Meta': {'object_name': 'UltrasoundScanning'},
'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'saved_ultrasound_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['patient']
|
aazhbd/medical_info01
|
patient/migrations/0012_auto__del_field_presentmedicalhistory_others__del_field_pastmedicalhis.py
|
Python
|
bsd-3-clause
| 30,034
|
[
"VisIt"
] |
52b8f659f6fde7c2047f27ac39bdbe7c3d93aafb708d55462cac9eff76389a6c
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Dendrogram helper functions and class'''
from copy import deepcopy
import sys
import numpy as np
from neurom.core import Tree, Neurite
from neurom.core.dataformat import COLS
def _n_terminations(tree):
'''Get the number of terminations in a tree'''
return sum(1 for _ in tree.ileaf())
def _max_recursion_depth(obj):
''' Estimate recursion depth, which is defined as the number of nodes in a tree
'''
neurites = obj.neurites if hasattr(obj, 'neurites') else [obj]
return max(sum(1 for _ in neu.iter_sections()) for neu in neurites)
def _total_rectangles(tree):
'''
Calculate the total number of segments that are required
for the dendrogram. There is a vertical line for each segment
and two horizontal line at each branching point
'''
return sum(len(sec.children) + sec.points.shape[0] - 1
for sec in tree.iter_sections())
def _n_rectangles(obj):
'''
Calculate the total number of rectangles with respect to
the type of the object
'''
return sum(_total_rectangles(neu) for neu in obj.neurites) \
if hasattr(obj, 'neurites') else _total_rectangles(obj)
def _square_segment(radius, origin):
'''Vertices for a square
'''
return np.array(((origin[0] - radius, origin[1] - radius),
(origin[0] - radius, origin[1] + radius),
(origin[0] + radius, origin[1] + radius),
(origin[0] + radius, origin[1] - radius)))
def _vertical_segment(old_offs, new_offs, spacing, radii):
'''Vertices for a vertical rectangle
'''
return np.array(((new_offs[0] - radii[0], old_offs[1] + spacing[1]),
(new_offs[0] - radii[1], new_offs[1]),
(new_offs[0] + radii[1], new_offs[1]),
(new_offs[0] + radii[0], old_offs[1] + spacing[1])))
def _horizontal_segment(old_offs, new_offs, spacing, diameter):
'''Vertices of a horizontal rectangle
'''
return np.array(((old_offs[0], old_offs[1] + spacing[1]),
(new_offs[0], old_offs[1] + spacing[1]),
(new_offs[0], old_offs[1] + spacing[1] - diameter),
(old_offs[0], old_offs[1] + spacing[1] - diameter)))
def _spacingx(node, max_dims, xoffset, xspace):
'''Determine the spacing of the current node depending on the number
of the leaves of the tree
'''
x_spacing = _n_terminations(node) * xspace
if x_spacing > max_dims[0]:
max_dims[0] = x_spacing
return xoffset - x_spacing / 2.
def _update_offsets(start_x, spacing, terminations, offsets, length):
'''Update the offsets
'''
return (start_x + spacing[0] * terminations / 2.,
offsets[1] + spacing[1] * 2. + length)
def _max_diameter(tree):
'''Find max diameter in tree
'''
return 2. * max(max(node.points[:, COLS.R]) for node in tree.ipreorder())
class Dendrogram(object):
'''Dendrogram
'''
def __init__(self, obj, show_diameters=True):
'''Create dendrogram
'''
# flag for diameters
self._show_diameters = show_diameters
# input object, tree, or neuron
self._obj = deepcopy(Neurite(obj) if isinstance(obj, Tree) else obj)
# counter/index for the storage of the rectangles.
# it is updated recursively
self._n = 0
# the maximum lengths in x and y that is occupied
# by a neurite. It is updated recursively.
self._max_dims = [0., 0.]
# stores indices that refer to the _rectangles array
# for each neurite
self._groups = []
# dims store the max dimensions for each neurite
# essential for the displacement in the plotting
self._dims = []
# initialize the number of rectangles
self._rectangles = np.zeros([_n_rectangles(self._obj), 4, 2])
# determine the maximum recursion depth for the given object
# which depends on the tree with the maximum number of nodes
self._max_rec_depth = _max_recursion_depth(self._obj)
def _generate_soma(self):
'''soma'''
radius = self._obj.soma.radius
return _square_segment(radius, (0., -radius))
def generate(self):
'''Generate dendrogram
'''
offsets = (0., 0.)
n_previous = 0
# set recursion limit with respect to
# the max number of nodes on the trees
old_depth = sys.getrecursionlimit()
max_depth = old_depth if old_depth > self._max_rec_depth else self._max_rec_depth
# TODO: This should be fixed so we don't set sys.setrecursionlimit at all
sys.setrecursionlimit(max_depth)
if isinstance(self._obj, Neurite):
max_diameter = _max_diameter(self._obj.root_node)
dummy_section = Tree()
dummy_section.add_child(self._obj.root_node)
self._generate_dendro(dummy_section, (max_diameter, 0.), offsets)
self._groups.append((0., self._n))
self._dims.append(self._max_dims)
else:
for neurite in self._obj.neurites:
neurite = neurite.root_node
max_diameter = _max_diameter(neurite)
dummy_section = Tree()
dummy_section.add_child(neurite)
self._generate_dendro(dummy_section, (max_diameter, 0.), offsets)
# store in trees the indices for the slice which corresponds
# to the current neurite
self._groups.append((n_previous, self._n))
# store the max dims per neurite for view positioning
self._dims.append(self._max_dims)
# reset the max dimensions for the next tree in line
self._max_dims = [0., 0.]
# keep track of the next tree start index in list
n_previous = self._n
# set it back to its initial value
sys.setrecursionlimit(old_depth)
def _generate_dendro(self, current_section, spacing, offsets):
'''Recursive function for dendrogram line computations
'''
max_dims = self._max_dims
start_x = _spacingx(current_section, max_dims, offsets[0], spacing[0])
for child in current_section.children:
segments = child.points
# number of leaves in child
terminations = _n_terminations(child)
# segement lengths
seg_lengths = np.linalg.norm(np.subtract(segments[:-1, COLS.XYZ],
segments[1:, COLS.XYZ]), axis=1)
# segment radii
radii = np.vstack((segments[:-1, COLS.R], segments[1:, COLS.R])).T \
if self._show_diameters else np.zeros((seg_lengths.shape[0], 2))
y_offset = offsets[1]
for i, slen in enumerate(seg_lengths):
# offset update for the vertical segments
new_offsets = _update_offsets(start_x, spacing, terminations,
(offsets[0], y_offset), slen)
# segments are drawn vertically, thus only y_offset changes from init offsets
self._rectangles[self._n] = _vertical_segment((offsets[0], y_offset),
new_offsets, spacing, radii[i, :])
self._n += 1
y_offset = new_offsets[1]
if y_offset + spacing[1] * 2 + sum(seg_lengths) > max_dims[1]:
max_dims[1] = y_offset + spacing[1] * 2. + sum(seg_lengths)
self._max_dims = max_dims
# recursive call to self.
self._generate_dendro(child, spacing, new_offsets)
# update the starting position for the next child
start_x += terminations * spacing[0]
# write the horizontal lines only for bifurcations, where the are actual horizontal
# lines and not zero ones
if offsets[0] != new_offsets[0]:
# horizontal segment. Thickness is either 0 if show_diameters is false
# or 1. if show_diameters is true
self._rectangles[self._n] = _horizontal_segment(offsets, new_offsets, spacing, 0.)
self._n += 1
@property
def data(self):
''' Returns the array with the rectangle collection
'''
return self._rectangles
@property
def groups(self):
''' Returns the list of the indices for the slicing of the
rectangle array wich correspond to each neurite
'''
return self._groups
@property
def dims(self):
''' Returns the list of the max dimensions for each neurite
'''
return self._dims
@property
def types(self):
''' Returns an iterator over the types of the neurites in the object.
If the object is a tree, then one value is returned.
'''
neurites = self._obj.neurites if hasattr(self._obj, 'neurites') else (self._obj,)
return (neu.type for neu in neurites)
@property
def soma(self):
''' Returns soma
'''
return self._generate_soma() if hasattr(self._obj, 'soma') else None
|
juanchopanza/NeuroM
|
neurom/view/_dendrogram.py
|
Python
|
bsd-3-clause
| 10,982
|
[
"NEURON"
] |
cb0eb96cfcc2d16c5415883537017b6a9917ae0d126dff69d3215eacdf8d30cc
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The main routine of this package that aims at performing the
extraction of ROIs from multisubject dataset using the localization
and activation strength of extracted regions.
This has been published in:
- Thirion et al. High level group analysis of FMRI data based on
Dirichlet process mixture models, IPMI 2007
- Thirion et al.
Accurate Definition of Brain Regions Position Through the
Functional Landmark Approach, MICCAI 2010
Author : Bertrand Thirion, 2006-2013
"""
from __future__ import absolute_import
import numpy as np
import scipy.stats as st
from .structural_bfls import build_landmarks
from nipy.algorithms.graph import wgraph_from_coo_matrix
from ...algorithms.statistics.empirical_pvalue import \
NormalEmpiricalNull, three_classes_GMM_fit, gamma_gaussian_fit
from .hroi import HROI_as_discrete_domain_blobs
####################################################################
# Ancillary functions
####################################################################
def _stat_to_proba(test, learn=None, method='prior', alpha=0.01, verbose=0):
"""Convert a set of statistics to posterior probabilities of being
generated under H0
Parameters
----------
test: array of shape(n_samples),
data that is assessed
learn: array of shape(n_samples), optional,
data to learn a mixture model
defaults to learn
method: {'gauss_mixture', 'emp_null', 'gam_gauss', 'prior'}, optional,
'gauss_mixture' A Gaussian Mixture Model is used
'emp_null' a null mode is fitted to test
'gam_gauss' a Gamma-Gaussian mixture is used
'prior' a hard-coded function is used
alpha: float in the [0,1] range, optional,
parameter that yields the prior probability that a region is active
should be chosen close to 0
verbose: int, optional,
verbosity mode
Returns
-------
posterior_null: array of shape(n_samples)
an estimation of the probability that the observation
is generated under the null
"""
if method == 'gauss_mixture':
prior_strength = 100
fixed_scale = True
posterior_probas = three_classes_GMM_fit(
learn, test, alpha, prior_strength, verbose, fixed_scale)
posterior_null = posterior_probas[:, 1]
elif method == 'emp_null':
enn = NormalEmpiricalNull(learn)
enn.learn()
posterior_null = np.reshape(enn.fdr(test), np.size(test))
elif method == 'gam_gauss':
posterior_probas = gamma_gaussian_fit(learn, test, verbose)
posterior_null = posterior_probas[:, 1]
elif method == 'prior':
y0 = st.norm.pdf(test)
shape_, scale_ = 3., 2.
y1 = st.gamma.pdf(test, shape_, scale=scale_)
posterior_null = np.ravel(
(1 - alpha) * y0 / (alpha * y1 + (1 - alpha) * y0))
else:
raise ValueError('Unknown method')
return posterior_null
def _compute_individual_regions(domain, stats, threshold=3.0, smin=5,
method='gauss_mixture'):
""" Compute the individual regions that are real activation candidates
Parameters
----------
domain : StructuredDomain instance,
generic descriptor of the space domain
stats: an array of shape (n_voxels, n_subjects)
the multi-subject statistical maps
threshold: float, optional
first level threshold
smin: int, optional
minimal size of the regions to validate them
method: {'gauss_mixture', 'emp_null', 'gam_gauss', 'prior'}, optional,
'gauss_mixture' A Gaussian Mixture Model is used
'emp_null' a null mode is fitted to test
'gam_gauss' a Gamma-Gaussian mixture is used
'prior' a hard-coded function is used
Returns
-------
hrois: list of nipy.labs.spatial_models.hroi.HierrachicalROI instances
that represent individual ROIs
let nr be the number of terminal regions across subjects
prior_h0: array of shape (nr),
the mixture-based prior probability
that the terminal regions are false positives
subjects: array of shape (nr),
the subject index associated with the terminal regions
coords: array of shape (nr, coord.shape[1]),
the coordinates of the of the terminal regions
Fixme
-----
Should allow for subject specific domains
"""
hrois = []
coords = []
prior_h0 = []
subjects = []
n_subjects = stats.shape[1]
nvox = stats.shape[0]
for subject in range(n_subjects):
# description in terms of blobs
stats_ = np.reshape(stats[:, subject], (nvox, 1))
hroi = HROI_as_discrete_domain_blobs(
domain, stats_, threshold=threshold, smin=smin)
if hroi is not None and hroi.k > 0:
# get the leave regions (i.e. the local maxima)
leaves = [hroi.select_id(id) for id in hroi.get_leaves_id()]
# get the region mean statistical value
mean_val = hroi.representative_feature('signal', 'weighted mean')
mean_val = mean_val[leaves]
# get the regions position
mean_pos = np.asarray(
[np.mean(coord, 0) for coord in hroi.get_coord()])
hroi.set_roi_feature('position', mean_pos)
coords.append(mean_pos[leaves])
# compute the prior proba of being null
learning_set = np.squeeze(stats_[stats_ != 0])
prior_h0.append(_stat_to_proba(mean_val, learning_set, method))
subjects.append(subject * np.ones(mean_val.size).astype(np.int))
else:
subjects.append([])
prior_h0.append([])
coords.append(np.empty((0, domain.dim)))
hrois.append(hroi)
prior_h0 = np.concatenate(prior_h0)
subjects = np.concatenate(subjects)
coords = np.concatenate(coords)
return hrois, prior_h0, subjects, coords
def _dpmm(coords, alpha, null_density, dof, prior_precision, prior_h0,
subjects, sampling_coords=None, n_iter=1000, burnin=100,
co_clust=False):
"""Apply the dpmm analysis to compute clusters from regions coordinates
"""
from nipy.algorithms.clustering.imm import MixedIMM
dim = coords.shape[1]
migmm = MixedIMM(alpha, dim)
migmm.set_priors(coords)
migmm.set_constant_densities(
null_dens=null_density, prior_dens=null_density)
migmm._prior_dof = dof
migmm._prior_scale = np.diag(prior_precision[0] / dof)
migmm._inv_prior_scale_ = [np.diag(dof * 1. / (prior_precision[0]))]
migmm.sample(coords, null_class_proba=prior_h0, niter=burnin, init=False,
kfold=subjects)
# sampling
like, pproba, co_clustering = migmm.sample(
coords, null_class_proba=prior_h0, niter=n_iter, kfold=subjects,
sampling_points=sampling_coords, co_clustering=True)
if co_clust:
return like, 1 - pproba, co_clustering
else:
return like, 1 - pproba
def _update_hroi_labels(hrois, new_labels):
"""Update the labels of the hroisusing new_labels"""
for subject in range(len(hrois)):
if hrois[subject].k > 0:
us = hrois[subject].get_roi_feature('label')
us[us > - 1] = new_labels[us[us > - 1]]
hrois[subject].set_roi_feature('label', us)
def _bsa_dpmm(hrois, prior_h0, subjects, coords, sigma, prevalence_pval,
prevalence_threshold, dof=10, alpha=.5, n_iter=1000, burnin=100,
algorithm='density'):
""" Estimation of the population level model of activation density using
dpmm and inference
Parameters
----------
hrois: list of nipy.labs.spatial_models.hroi.HierarchicalROI instances
representing individual ROIs
Let nr be the number of terminal regions across subjects
prior_h0: array of shape (nr)
mixture-based prior probability
that the terminal regions are true positives
subjects: array of shape (nr)
subject index associated with the terminal regions
coords: array of shape (nr, coord.shape[1])
coordinates of the of the terminal regions
sigma: float > 0,
expected cluster scatter in the common space in units of coord
prevalence_pval: float in the [0,1] interval, optional
p-value of the prevalence test
prevalence_threshold: float in the rannge [0,nsubj]
null hypothesis on region prevalence
dof: float > 0, optional,
degrees of freedom of the prior
alpha: float > 0, optional,
creation parameter of the DPMM
niter: int, optional,
number of iterations of the DPMM
burnin: int, optional,
number of iterations of the DPMM
algorithm: {'density', 'co_occurrence'}, optional,
algorithm used in the DPMM inference
Returns
-------
landmarks: instance of sbf.LandmarkRegions
that describes the ROIs found in inter-subject inference
If no such thing can be defined landmarks is set to None
hrois: List of nipy.labs.spatial_models.hroi.HierarchicalROI instances
representing individual ROIs
"""
from nipy.algorithms.graph.field import field_from_coo_matrix_and_data
domain = hrois[0].domain
n_subjects = len(hrois)
landmarks = None
density = np.zeros(domain.size)
if len(subjects) < 1:
return landmarks, hrois
null_density = 1. / domain.local_volume.sum()
# prepare the DPMM
dim = domain.em_dim
prior_precision = 1. / (sigma ** 2) * np.ones((1, dim))
# n_iter = number of iterations to estimate density
if algorithm == 'density':
density, post_proba = _dpmm(
coords, alpha, null_density, dof, prior_precision, prior_h0,
subjects, domain.coord, n_iter=n_iter, burnin=burnin)
# associate labels with coords
Fbeta = field_from_coo_matrix_and_data(domain.topology, density)
_, label = Fbeta.custom_watershed(0, null_density)
midx = np.array([np.argmin(np.sum((domain.coord - coord_) ** 2, 1))
for coord_ in coords])
components = label[midx]
elif algorithm == 'co-occurrence':
post_proba, density, co_clustering = _dpmm(
coords, alpha, null_density, dof, prior_precision, prior_h0,
subjects, n_iter=n_iter, burnin=burnin, co_clust=True)
contingency_graph = wgraph_from_coo_matrix(co_clustering)
if contingency_graph.E > 0:
contingency_graph.remove_edges(contingency_graph.weights > .5)
components = contingency_graph.cc()
components[density < null_density] = components.max() + 1 +\
np.arange(np.sum(density < null_density))
else:
raise ValueError('Unknown algorithm')
# append some information to the hroi in each subject
for subject in range(n_subjects):
bfs = hrois[subject]
if bfs is None:
continue
if bfs.k == 0:
bfs.set_roi_feature('label', np.array([]))
continue
leaves_pos = [bfs.select_id(k) for k in bfs.get_leaves_id()]
# save posterior proba
post_proba_ = np.zeros(bfs.k)
post_proba_[leaves_pos] = post_proba[subjects == subject]
bfs.set_roi_feature('posterior_proba', post_proba_)
# save prior proba
prior_proba = np.zeros(bfs.k)
prior_proba[leaves_pos] = 1 - prior_h0[subjects == subject]
bfs.set_roi_feature('prior_proba', prior_proba)
# assign labels to ROIs
roi_label = - np.ones(bfs.k).astype(np.int)
roi_label[leaves_pos] = components[subjects == subject]
# when parent regions has similarly labelled children,
# include it also
roi_label = bfs.make_forest().propagate_upward(roi_label)
bfs.set_roi_feature('label', roi_label)
# derive the group-level landmarks
# with a threshold on the number of subjects
# that are represented in each one
landmarks, new_labels = build_landmarks(
domain, coords, subjects, np.array(components), 1 - prior_h0,
prevalence_pval, prevalence_threshold, sigma)
# relabel the regions
_update_hroi_labels(hrois, new_labels)
return landmarks, hrois
###########################################################################
# Main function
###########################################################################
def compute_landmarks(
domain, stats, sigma, prevalence_pval=0.5, prevalence_threshold=0,
threshold=3.0, smin=5, method='prior', algorithm='density', n_iter=1000,
burnin=100):
""" Compute the Bayesian Structural Activation patterns
Parameters
----------
domain: StructuredDomain instance,
Description of the spatial context of the data
stats: array of shape (nbnodes, subjects):
the multi-subject statistical maps
sigma: float > 0:
expected cluster std in the common space in units of coord
prevalence_pval: float in the [0,1] interval, optional
posterior significance threshold
prevalence_threshold: float, optional,
reference threshold for the prevalence value
threshold: float, optional,
first level threshold
smin: int, optional,
minimal size of the regions to validate them
method: {'gauss_mixture', 'emp_null', 'gam_gauss', 'prior'}, optional,
'gauss_mixture' A Gaussian Mixture Model is used
'emp_null' a null mode is fitted to test
'gam_gauss' a Gamma-Gaussian mixture is used
'prior' a hard-coded function is used
algorithm: string, one of ['density', 'co-occurrence'], optional
method used to compute the landmarks
niter: int, optional,
number of iterations of the DPMM
burnin: int, optional,
number of iterations of the DPMM
Returns
-------
landmarks: Instance of sbf.LandmarkRegions or None,
Describes the ROIs found in inter-subject inference
None if nothing can be defined
hrois: list of nipy.labs.spatial_models.hroi.Nroi instances
representing individual ROIs
"""
hrois, prior_h0, subjects, coords = _compute_individual_regions(
domain, stats, threshold, smin, method)
landmarks, hrois = _bsa_dpmm(
hrois, prior_h0, subjects, coords, sigma, prevalence_pval,
prevalence_threshold, algorithm=algorithm, n_iter=n_iter,
burnin=burnin)
return landmarks, hrois
|
alexis-roche/nipy
|
nipy/labs/spatial_models/bayesian_structural_analysis.py
|
Python
|
bsd-3-clause
| 14,865
|
[
"Gaussian"
] |
b7cc2b54c8db62dc2b47f31297ee976e3d1007016c32887263b808f783fa874d
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Numpy2Vtk documentation build configuration file, created by
# sphinx-quickstart on Sat Jun 20 19:11:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from mock import Mock
class MockModule(Mock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['vtk', 'numpy']
sys.modules.update((mod_name, MockModule()) for mod_name in MOCK_MODULES)
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Numpy2Vtk'
copyright = '2015, Stefan Lau'
author = 'Stefan Lau'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Numpy2Vtkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Numpy2Vtk.tex', 'Numpy2Vtk Documentation',
'Stefan Lau', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'numpy2vtk', 'Numpy2Vtk Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Numpy2Vtk', 'Numpy2Vtk Documentation',
author, 'Numpy2Vtk', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
selaux/numpy2vtk
|
docs/conf.py
|
Python
|
lgpl-3.0
| 9,252
|
[
"VTK"
] |
231f88041769c58e1d99e22aeb81512f17efe0afff74f636f0a47c13604341b3
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Enumerator with the libxc identifiers.
This is a low level object, client code should not interact with LibxcFunc directly
but use the API provided by the Xcfunc object defined in core.xcfunc.py.
Part of this module is automatically generated so be careful when refactoring stuff.
Use the script ~pymatgen/dev_scripts/regen_libxcfunc.py to regenerate the enum values.
"""
import json
import os
from enum import Enum
from monty.json import MontyEncoder
# The libxc version used to generate this file!
libxc_version = "3.0.0"
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2016, The Materials Project"
__version__ = libxc_version
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2016"
# Loads libxc info from json file
with open(os.path.join(os.path.dirname(__file__), "libxc_docs.json")) as fh:
_all_xcfuncs = {int(k): v for k, v in json.load(fh).items()}
# @unique
class LibxcFunc(Enum):
"""
Enumerator with the identifiers. This object is used by Xcfunc
declared in xcfunc.py to create an internal representation of the XC functional.
This is a low level object, client code should not interact with LibxcFunc directly
but use the API provided by Xcfunc.
"""
# begin_include_dont_touch
LDA_C_1D_CSC = 18
LDA_C_1D_LOOS = 26
LDA_C_2D_AMGB = 15
LDA_C_2D_PRM = 16
LDA_C_GOMBAS = 24
LDA_C_HL = 4
LDA_C_GL = 5
LDA_C_vBH = 17
LDA_C_ML1 = 22
LDA_C_ML2 = 23
LDA_C_PW = 12
LDA_C_PW_MOD = 13
LDA_C_OB_PW = 14
LDA_C_PW_RPA = 25
LDA_C_PZ = 9
LDA_C_PZ_MOD = 10
LDA_C_OB_PZ = 11
LDA_C_RC04 = 27
LDA_C_RPA = 3
LDA_C_VWN = 7
LDA_C_VWN_1 = 28
LDA_C_VWN_2 = 29
LDA_C_VWN_3 = 30
LDA_C_VWN_4 = 31
LDA_C_VWN_RPA = 8
LDA_C_WIGNER = 2
LDA_K_TF = 50
LDA_K_LP = 51
LDA_X = 1
LDA_C_XALPHA = 6
LDA_X_1D = 21
LDA_X_2D = 19
LDA_XC_KSDT = 259
LDA_XC_TETER93 = 20
LDA_XC_ZLP = 43
GGA_C_AM05 = 135
GGA_C_FT97 = 88
GGA_C_LM = 137
GGA_C_LYP = 131
GGA_C_OP_B88 = 87
GGA_C_OP_PBE = 86
GGA_C_OP_G96 = 85
GGA_C_OP_PW91 = 262
GGA_C_OP_XALPHA = 84
GGA_C_OPTC = 200
GGA_C_P86 = 132
GGA_C_PBE = 130
GGA_C_PBE_SOL = 133
GGA_C_XPBE = 136
GGA_C_PBE_JRGX = 138
GGA_C_RGE2 = 143
GGA_C_APBE = 186
GGA_C_SPBE = 89
GGA_C_REGTPSS = 83
GGA_C_ZPBESOL = 63
GGA_C_PBEINT = 62
GGA_C_ZPBEINT = 61
GGA_C_PBELOC = 246
GGA_C_BGCP = 39
GGA_C_PBEFE = 258
GGA_C_PW91 = 134
GGA_C_Q2D = 47
GGA_C_SOGGA11 = 152
GGA_C_SOGGA11_X = 159
GGA_C_TCA = 100
GGA_C_REVTCA = 99
GGA_C_WI0 = 153
GGA_C_WI = 148
GGA_C_WL = 147
GGA_K_DK = 516
GGA_K_PERDEW = 517
GGA_K_VSK = 518
GGA_K_VJKS = 519
GGA_K_ERNZERHOF = 520
GGA_K_MEYER = 57
GGA_K_OL1 = 512
GGA_X_OL2 = 183
GGA_K_OL2 = 513
GGA_K_PEARSON = 511
GGA_K_TFVW = 52
GGA_K_VW = 500
GGA_K_GE2 = 501
GGA_K_GOLDEN = 502
GGA_K_YT65 = 503
GGA_K_BALTIN = 504
GGA_K_LIEB = 505
GGA_K_ABSP1 = 506
GGA_K_ABSP2 = 507
GGA_K_GR = 508
GGA_K_LUDENA = 509
GGA_K_GP85 = 510
GGA_X_2D_B86 = 128
GGA_X_2D_B86_MGC = 124
GGA_X_2D_B88 = 127
GGA_X_2D_PBE = 129
GGA_X_AIRY = 192
GGA_X_LAG = 193
GGA_X_AK13 = 56
GGA_X_AM05 = 120
GGA_X_B86 = 103
GGA_X_B86_MGC = 105
GGA_X_B86_R = 41
GGA_X_B88 = 106
GGA_X_OPTB88_VDW = 139
GGA_X_MB88 = 149
GGA_K_LLP = 522
GGA_K_FR_B88 = 514
GGA_K_THAKKAR = 523
GGA_X_BAYESIAN = 125
GGA_X_BPCCAC = 98
GGA_X_C09X = 158
GGA_X_CAP = 270
GGA_X_DK87_R1 = 111
GGA_X_DK87_R2 = 112
GGA_X_EV93 = 35
GGA_X_FT97_A = 114
GGA_X_FT97_B = 115
GGA_X_G96 = 107
GGA_X_HCTH_A = 34
GGA_X_HERMAN = 104
GGA_X_HJS_PBE = 525
GGA_X_HJS_PBE_SOL = 526
GGA_X_HJS_B88 = 527
GGA_X_HJS_B97X = 528
GGA_X_HJS_B88_V2 = 46
GGA_X_HTBS = 191
GGA_X_ITYH = 529
GGA_X_KT1 = 145
GGA_XC_KT2 = 146
GGA_X_LB = 160
GGA_X_LBM = 182
GGA_X_LG93 = 113
GGA_X_LV_RPW86 = 58
GGA_X_MPBE = 122
GGA_X_N12 = 82
GGA_X_GAM = 32
GGA_X_OPTX = 110
GGA_X_PBE = 101
GGA_X_PBE_R = 102
GGA_X_PBE_SOL = 116
GGA_X_XPBE = 123
GGA_X_PBE_JSJR = 126
GGA_X_PBEK1_VDW = 140
GGA_X_RGE2 = 142
GGA_X_APBE = 184
GGA_X_PBEINT = 60
GGA_X_PBE_TCA = 59
GGA_X_LAMBDA_LO_N = 45
GGA_X_LAMBDA_CH_N = 44
GGA_X_LAMBDA_OC2_N = 40
GGA_X_PBE_MOL = 49
GGA_X_BGCP = 38
GGA_X_PBEFE = 265
GGA_K_APBE = 185
GGA_K_REVAPBE = 55
GGA_K_TW1 = 187
GGA_K_TW2 = 188
GGA_K_TW3 = 189
GGA_K_TW4 = 190
GGA_K_APBEINT = 54
GGA_K_REVAPBEINT = 53
GGA_X_PBEA = 121
GGA_X_PW86 = 108
GGA_X_RPW86 = 144
GGA_K_FR_PW86 = 515
GGA_X_PW91 = 109
GGA_X_MPW91 = 119
GGA_K_LC94 = 521
GGA_X_Q2D = 48
GGA_X_RPBE = 117
GGA_X_SFAT = 530
GGA_X_SOGGA11 = 151
GGA_X_SSB_SW = 90
GGA_X_SSB = 91
GGA_X_SSB_D = 92
GGA_X_VMT_PBE = 71
GGA_X_VMT_GE = 70
GGA_X_VMT84_PBE = 69
GGA_X_VMT84_GE = 68
GGA_X_WC = 118
GGA_X_WPBEH = 524
GGA_XC_XLYP = 166
GGA_XC_PBE1W = 173
GGA_XC_MPWLYP1W = 174
GGA_XC_PBELYP1W = 175
GGA_XC_B97_D = 170
GGA_XC_HCTH_93 = 161
GGA_XC_HCTH_120 = 162
GGA_XC_HCTH_147 = 163
GGA_XC_HCTH_407 = 164
GGA_C_HCTH_A = 97
GGA_XC_B97_GGA1 = 96
GGA_XC_HCTH_P14 = 95
GGA_XC_HCTH_P76 = 94
GGA_XC_HCTH_407P = 93
GGA_C_N12 = 80
GGA_C_N12_SX = 79
GGA_C_GAM = 33
GGA_XC_EDF1 = 165
GGA_X_OPTPBE_VDW = 141
GGA_XC_MOHLYP = 194
GGA_XC_MOHLYP2 = 195
GGA_X_SOGGA = 150
GGA_XC_OBLYP_D = 67
GGA_XC_OPWLYP_D = 66
GGA_XC_OPBE_D = 65
GGA_XC_TH_FL = 196
GGA_XC_TH_FC = 197
GGA_XC_TH_FCFO = 198
GGA_XC_TH_FCO = 199
GGA_XC_TH1 = 154
GGA_XC_TH2 = 155
GGA_XC_TH3 = 156
GGA_XC_TH4 = 157
GGA_XC_VV10 = 255
HYB_GGA_XC_CAP0 = 477
HYB_GGA_X_N12_SX = 81
HYB_GGA_X_SOGGA11_X = 426
HYB_GGA_XC_B97 = 407
HYB_GGA_XC_B97_1 = 408
HYB_GGA_XC_B97_2 = 410
HYB_GGA_XC_B97_K = 413
HYB_GGA_XC_B97_3 = 414
HYB_GGA_XC_SB98_1a = 420
HYB_GGA_XC_SB98_1b = 421
HYB_GGA_XC_SB98_1c = 422
HYB_GGA_XC_SB98_2a = 423
HYB_GGA_XC_SB98_2b = 424
HYB_GGA_XC_SB98_2c = 425
HYB_GGA_XC_WB97 = 463
HYB_GGA_XC_WB97X = 464
HYB_GGA_XC_WB97X_V = 466
HYB_GGA_XC_WB97X_D = 471
HYB_GGA_XC_B97_1p = 266
HYB_GGA_XC_LC_VV10 = 469
HYB_GGA_XC_B1WC = 412
HYB_GGA_XC_B1LYP = 416
HYB_GGA_XC_B1PW91 = 417
HYB_GGA_XC_mPW1PW = 418
HYB_GGA_XC_mPW1K = 405
HYB_GGA_XC_BHANDH = 435
HYB_GGA_XC_BHANDHLYP = 436
HYB_GGA_XC_MPWLYP1M = 453
HYB_GGA_XC_B3PW91 = 401
HYB_GGA_XC_B3LYP = 402
HYB_GGA_XC_B3LYP5 = 475
HYB_GGA_XC_B3P86 = 403
HYB_GGA_XC_MPW3PW = 415
HYB_GGA_XC_MPW3LYP = 419
HYB_GGA_XC_MB3LYP_RC04 = 437
HYB_GGA_XC_REVB3LYP = 454
HYB_GGA_XC_B3LYPs = 459
HYB_GGA_XC_CAM_B3LYP = 433
HYB_GGA_XC_TUNED_CAM_B3LYP = 434
HYB_GGA_XC_CAMY_B3LYP = 470
HYB_GGA_XC_CAMY_BLYP = 455
HYB_GGA_XC_EDF2 = 476
HYB_GGA_XC_HSE03 = 427
HYB_GGA_XC_HSE06 = 428
HYB_GGA_XC_LRC_WPBEH = 465
HYB_GGA_XC_LRC_WPBE = 473
HYB_GGA_XC_HJS_PBE = 429
HYB_GGA_XC_HJS_PBE_SOL = 430
HYB_GGA_XC_HJS_B88 = 431
HYB_GGA_XC_HJS_B97X = 432
HYB_GGA_XC_LCY_BLYP = 468
HYB_GGA_XC_LCY_PBE = 467
HYB_GGA_XC_O3LYP = 404
HYB_GGA_XC_X3LYP = 411
HYB_GGA_XC_PBEH = 406
HYB_GGA_XC_PBE0_13 = 456
HYB_GGA_XC_HPBEINT = 472
MGGA_XC_TPSSLYP1W = 242
MGGA_C_BC95 = 240
MGGA_C_CC06 = 229
MGGA_C_CS = 72
MGGA_C_M08_HX = 78
MGGA_C_M08_SO = 77
MGGA_C_M11 = 76
MGGA_C_M11_L = 75
MGGA_C_MN12_L = 74
MGGA_C_MN12_SX = 73
MGGA_C_MN15_L = 261
MGGA_C_MN15 = 269
MGGA_C_PKZB = 239
MGGA_C_TPSS = 231
MGGA_C_REVTPSS = 241
MGGA_C_TPSSLOC = 247
MGGA_C_SCAN = 267
MGGA_C_M05 = 237
MGGA_C_M05_2X = 238
MGGA_C_VSXC = 232
MGGA_C_M06_L = 233
MGGA_C_M06_HF = 234
MGGA_C_M06 = 235
MGGA_C_M06_2X = 236
MGGA_C_DLDF = 37
MGGA_X_2D_PRHG07 = 210
MGGA_X_2D_PRHG07_PRP10 = 211
MGGA_X_BR89 = 206
MGGA_X_BJ06 = 207
MGGA_X_TB09 = 208
MGGA_X_RPP09 = 209
MGGA_X_GVT4 = 204
MGGA_X_LTA = 201
MGGA_X_M05 = 214
MGGA_X_M05_2X = 215
MGGA_X_M06_2X = 218
MGGA_X_M06_L = 203
MGGA_X_M06_HF = 216
MGGA_X_M06 = 217
MGGA_X_M08_HX = 219
MGGA_X_M08_SO = 220
MGGA_X_M11 = 225
MGGA_X_M11_L = 226
MGGA_X_MBEEF = 249
MGGA_X_MBEEFVDW = 250
MGGA_X_MK00 = 230
MGGA_X_MK00B = 243
MGGA_X_MN12_L = 227
MGGA_X_MN15_L = 260
MGGA_X_MS0 = 221
MGGA_X_MS1 = 222
MGGA_X_MS2 = 223
MGGA_X_MVS = 257
MGGA_X_PKZB = 213
MGGA_X_SCAN = 263
MGGA_X_TAU_HCTH = 205
MGGA_X_TPSS = 202
MGGA_X_MODTPSS = 245
MGGA_X_REVTPSS = 212
MGGA_X_BLOC = 244
MGGA_XC_B97M_V = 254
MGGA_XC_OTPSS_D = 64
MGGA_XC_ZLP = 42
HYB_MGGA_X_MVSH = 474
HYB_MGGA_XC_M05 = 438
HYB_MGGA_XC_M05_2X = 439
HYB_MGGA_XC_B88B95 = 440
HYB_MGGA_XC_B86B95 = 441
HYB_MGGA_XC_PW86B95 = 442
HYB_MGGA_XC_BB1K = 443
HYB_MGGA_XC_MPW1B95 = 445
HYB_MGGA_XC_MPWB1K = 446
HYB_MGGA_XC_X1B95 = 447
HYB_MGGA_XC_XB1K = 448
HYB_MGGA_XC_M06_HF = 444
HYB_MGGA_XC_M06 = 449
HYB_MGGA_XC_M06_2X = 450
HYB_MGGA_XC_PW6B95 = 451
HYB_MGGA_XC_PWB6K = 452
HYB_MGGA_XC_TPSSH = 457
HYB_MGGA_XC_REVTPSSH = 458
HYB_MGGA_X_DLDF = 36
HYB_MGGA_XC_M08_HX = 460
HYB_MGGA_XC_M08_SO = 461
HYB_MGGA_XC_M11 = 462
HYB_MGGA_X_MN12_SX = 248
HYB_MGGA_X_MN15 = 268
HYB_MGGA_X_MS2H = 224
HYB_MGGA_X_SCAN0 = 264
HYB_MGGA_XC_WB97M_V = 531
# end_include_dont_touch
def __init__(self, num):
"""
Init.
:param num: Number for the xc.
"""
info = _all_xcfuncs[self.value]
self.kind = info["Kind"]
self.family = info["Family"]
def __str__(self):
return f"name={self.name}, kind={self.kind}, family={self.family}"
@staticmethod
def all_families():
"""
List of strings with the libxc families.
Note that XC_FAMILY if removed from the string e.g. XC_FAMILY_LDA becomes LDA
"""
return sorted({d["Family"] for d in _all_xcfuncs.values()})
@staticmethod
def all_kinds():
"""
List of strings with the libxc kinds.
Also in this case, the string is obtained by remove the XC_ prefix.
XC_CORRELATION --> CORRELATION
"""
return sorted({d["Kind"] for d in _all_xcfuncs.values()})
@property
def info_dict(self):
"""Dictionary with metadata. see libxc_docs.json"""
return _all_xcfuncs[self.value]
@property
def is_x_kind(self):
"""True if this is an exchange-only functional"""
return self.kind == "EXCHANGE"
@property
def is_c_kind(self):
"""True if this is a correlation-only functional"""
return self.kind == "CORRELATION"
@property
def is_k_kind(self):
"""True if this is a kinetic functional"""
return self.kind == "KINETIC"
@property
def is_xc_kind(self):
"""True if this is a exchange+correlation functional"""
return self.kind == "EXCHANGE_CORRELATION"
@property
def is_lda_family(self):
"""True if this functional belongs to the LDA family."""
return self.family == "LDA"
@property
def is_gga_family(self):
"""True if this functional belongs to the GGA family."""
return self.family == "GGA"
@property
def is_mgga_family(self):
"""True if this functional belongs to the meta-GGA family."""
return self.family == "MGGA"
@property
def is_hyb_gga_family(self):
"""True if this functional belongs to the hybrid + GGA family."""
return self.family == "HYB_GGA"
@property
def is_hyb_mgga_family(self):
"""True if this functional belongs to the hybrid + meta-GGA family."""
return self.family == "HYB_MGGA"
def as_dict(self):
"""
Makes LibxcFunc obey the general json interface used in pymatgen for
easier serialization.
"""
return {
"name": self.name,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
}
@staticmethod
def from_dict(d):
"""
Makes LibxcFunc obey the general json interface used in pymatgen for
easier serialization.
"""
return LibxcFunc[d["name"]]
def to_json(self):
"""
Returns a json string representation of the MSONable object.
"""
return json.dumps(self.as_dict(), cls=MontyEncoder)
if __name__ == "__main__":
for xc in LibxcFunc:
print(xc)
|
vorwerkc/pymatgen
|
pymatgen/core/libxcfunc.py
|
Python
|
mit
| 13,095
|
[
"pymatgen"
] |
7d37b556700b8b26611c6691b430247342eeeae2b3bc1e4ab2532f5e7d5044fa
|
# -*- coding:utf-8 -*-
#
# Copyright © 2011-2012 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""
IPython v0.13+ client's widget
"""
# Fix for Issue 1356
from __future__ import absolute_import
# Stdlib imports
import os
import os.path as osp
from string import Template
import sys
import time
# Qt imports
from spyderlib.qt.QtGui import (QTextEdit, QKeySequence, QWidget, QMenu,
QHBoxLayout, QToolButton, QVBoxLayout,
QMessageBox)
from spyderlib.qt.QtCore import SIGNAL, Qt
from spyderlib import pygments_patch
pygments_patch.apply()
# IPython imports
from IPython.qt.console.rich_ipython_widget import RichIPythonWidget
from IPython.qt.console.ansi_code_processor import ANSI_OR_SPECIAL_PATTERN
from IPython.core.application import get_ipython_dir
from IPython.core.oinspect import call_tip
from IPython.config.loader import Config, load_pyconfig_files
# Local imports
from spyderlib.baseconfig import (get_conf_path, get_image_path,
get_module_source_path, _)
from spyderlib.config import CONF
from spyderlib.guiconfig import (create_shortcut, get_font, get_shortcut,
new_shortcut)
from spyderlib.utils.dochelpers import getargspecfromtext, getsignaturefromtext
from spyderlib.utils.qthelpers import (get_std_icon, create_toolbutton,
add_actions, create_action, get_icon,
restore_keyevent)
from spyderlib.utils import programs, sourcecode
from spyderlib.widgets.browser import WebView
from spyderlib.widgets.calltip import CallTipWidget
from spyderlib.widgets.mixins import (BaseEditMixin, InspectObjectMixin,
SaveHistoryMixin, TracebackLinksMixin)
#-----------------------------------------------------------------------------
# Templates
#-----------------------------------------------------------------------------
# Using the same css file from the Object Inspector for now. Maybe
# later it'll be a good idea to create a new one.
UTILS_PATH = get_module_source_path('spyderlib', 'utils')
CSS_PATH = osp.join(UTILS_PATH, 'inspector', 'static', 'css')
TEMPLATES_PATH = osp.join(UTILS_PATH, 'ipython', 'templates')
BLANK = open(osp.join(TEMPLATES_PATH, 'blank.html')).read()
LOADING = open(osp.join(TEMPLATES_PATH, 'loading.html')).read()
KERNEL_ERROR = open(osp.join(TEMPLATES_PATH, 'kernel_error.html')).read()
#-----------------------------------------------------------------------------
# Control widgets
#-----------------------------------------------------------------------------
class IPythonControlWidget(TracebackLinksMixin, InspectObjectMixin, QTextEdit,
BaseEditMixin):
"""
Subclass of QTextEdit with features from Spyder's mixins to use as the
control widget for IPython widgets
"""
QT_CLASS = QTextEdit
def __init__(self, parent=None):
QTextEdit.__init__(self, parent)
BaseEditMixin.__init__(self)
TracebackLinksMixin.__init__(self)
InspectObjectMixin.__init__(self)
self.found_results = []
self.calltip_widget = CallTipWidget(self, hide_timer_on=True)
# To not use Spyder calltips obtained through the monitor
self.calltips = False
def showEvent(self, event):
"""Reimplement Qt Method"""
self.emit(SIGNAL("visibility_changed(bool)"), True)
def _key_question(self, text):
""" Action for '?' and '(' """
parent = self.parentWidget()
self.current_prompt_pos = parent._prompt_pos
if self.get_current_line_to_cursor():
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.show_object_info(last_obj)
self.insert_text(text)
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
event, text, key, ctrl, shift = restore_keyevent(event)
if key == Qt.Key_Question and not self.has_selected_text():
self._key_question(text)
elif key == Qt.Key_ParenLeft and not self.has_selected_text():
self._key_question(text)
else:
# Let the parent widget handle the key press event
QTextEdit.keyPressEvent(self, event)
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonControlWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonControlWidget, self).focusOutEvent(event)
class IPythonPageControlWidget(QTextEdit, BaseEditMixin):
"""
Subclass of QTextEdit with features from Spyder's mixins.BaseEditMixin to
use as the paging widget for IPython widgets
"""
QT_CLASS = QTextEdit
def __init__(self, parent=None):
QTextEdit.__init__(self, parent)
BaseEditMixin.__init__(self)
self.found_results = []
def showEvent(self, event):
"""Reimplement Qt Method"""
self.emit(SIGNAL("visibility_changed(bool)"), True)
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
event, text, key, ctrl, shift = restore_keyevent(event)
if key == Qt.Key_Slash and self.isVisible():
self.emit(SIGNAL("show_find_widget()"))
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonPageControlWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonPageControlWidget, self).focusOutEvent(event)
#-----------------------------------------------------------------------------
# Shell widget
#-----------------------------------------------------------------------------
class IPythonShellWidget(RichIPythonWidget):
"""
Spyder's IPython shell widget
This class has custom control and page_control widgets, additional methods
to provide missing functionality and a couple more keyboard shortcuts.
"""
def __init__(self, *args, **kw):
# To override the Qt widget used by RichIPythonWidget
self.custom_control = IPythonControlWidget
self.custom_page_control = IPythonPageControlWidget
super(IPythonShellWidget, self).__init__(*args, **kw)
self.set_background_color()
# --- Spyder variables ---
self.ipyclient = None
# --- Keyboard shortcuts ---
self.shortcuts = self.create_shortcuts()
# --- IPython variables ---
# To send an interrupt signal to the Spyder kernel
self.custom_interrupt = True
# To restart the Spyder kernel in case it dies
self.custom_restart = True
#---- Public API ----------------------------------------------------------
def set_ipyclient(self, ipyclient):
"""Bind this shell widget to an IPython client one"""
self.ipyclient = ipyclient
self.exit_requested.connect(ipyclient.exit_callback)
def long_banner(self):
"""Banner for IPython widgets with pylab message"""
from IPython.core.usage import default_gui_banner
banner = default_gui_banner
pylab_o = CONF.get('ipython_console', 'pylab', True)
autoload_pylab_o = CONF.get('ipython_console', 'pylab/autoload', True)
mpl_installed = programs.is_module_installed('matplotlib')
if mpl_installed and (pylab_o and autoload_pylab_o):
pylab_message = ("\nPopulating the interactive namespace from "
"numpy and matplotlib")
banner = banner + pylab_message
sympy_o = CONF.get('ipython_console', 'symbolic_math', True)
if sympy_o:
lines = """
These commands were executed:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
"""
banner = banner + lines
return banner
def short_banner(self):
"""Short banner with Python and IPython versions"""
from IPython.core.release import version
py_ver = '%d.%d.%d' % (sys.version_info[0], sys.version_info[1],
sys.version_info[2])
banner = 'Python %s on %s -- IPython %s' % (py_ver, sys.platform,
version)
return banner
def clear_console(self):
self.execute("%clear")
def write_to_stdin(self, line):
"""Send raw characters to the IPython kernel through stdin"""
try:
self.kernel_client.stdin_channel.input(line)
except AttributeError:
self.kernel_client.input(line)
def set_background_color(self):
lightbg_o = CONF.get('ipython_console', 'light_color')
if not lightbg_o:
self.set_default_style(colors='linux')
def create_shortcuts(self):
inspect = create_shortcut(self._control.inspect_current_object,
context='Console', name='Inspect current object',
parent=self)
clear_console = create_shortcut(self.clear_console, context='Console',
name='Clear shell', parent=self)
# Fixed shortcuts
new_shortcut("Ctrl+T", self,
lambda: self.emit(SIGNAL("new_ipyclient()")))
return [inspect, clear_console]
def get_signature(self, content):
"""Get signature from inspect reply content"""
data = content.get('data', {})
text = data.get('text/plain', '')
if text:
text = ANSI_OR_SPECIAL_PATTERN.sub('', text)
line = self._control.get_current_line_to_cursor()
name = line[:-1].split('.')[-1]
argspec = getargspecfromtext(text)
if argspec:
# This covers cases like np.abs, whose docstring is
# the same as np.absolute and because of that a proper
# signature can't be obtained correctly
signature = name + argspec
else:
signature = getsignaturefromtext(text, name)
return signature
else:
return ''
#---- IPython private methods ---------------------------------------------
def _context_menu_make(self, pos):
"""Reimplement the IPython context menu"""
menu = super(IPythonShellWidget, self)._context_menu_make(pos)
return self.ipyclient.add_actions_to_context_menu(menu)
def _banner_default(self):
"""
Reimplement banner creation to let the user decide if he wants a
banner or not
"""
banner_o = CONF.get('ipython_console', 'show_banner', True)
if banner_o:
return self.long_banner()
else:
return self.short_banner()
def _handle_object_info_reply(self, rep):
"""
Reimplement call tips to only show signatures, using the same style
from our Editor and External Console too
Note: For IPython 2-
"""
self.log.debug("oinfo: %s", rep.get('content', ''))
cursor = self._get_cursor()
info = self._request_info.get('call_tip')
if info and info.id == rep['parent_header']['msg_id'] and \
info.pos == cursor.position():
content = rep['content']
if content.get('ismagic', False):
call_info, doc = None, None
else:
call_info, doc = call_tip(content, format_call=True)
if call_info is None and doc is not None:
name = content['name'].split('.')[-1]
argspec = getargspecfromtext(doc)
if argspec:
# This covers cases like np.abs, whose docstring is
# the same as np.absolute and because of that a proper
# signature can't be obtained correctly
call_info = name + argspec
else:
call_info = getsignaturefromtext(doc, name)
if call_info:
self._control.show_calltip(_("Arguments"), call_info,
signature=True, color='#2D62FF')
def _handle_inspect_reply(self, rep):
"""
Reimplement call tips to only show signatures, using the same style
from our Editor and External Console too
Note: For IPython 3+
"""
cursor = self._get_cursor()
info = self._request_info.get('call_tip')
if info and info.id == rep['parent_header']['msg_id'] and \
info.pos == cursor.position():
content = rep['content']
if content.get('status') == 'ok' and content.get('found', False):
signature = self.get_signature(content)
if signature:
self._control.show_calltip(_("Arguments"), signature,
signature=True, color='#2D62FF')
#---- Qt methods ----------------------------------------------------------
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonShellWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonShellWidget, self).focusOutEvent(event)
#-----------------------------------------------------------------------------
# Client widget
#-----------------------------------------------------------------------------
class IPythonClient(QWidget, SaveHistoryMixin):
"""
IPython client or frontend for Spyder
This is a widget composed of a shell widget (i.e. RichIPythonWidget
+ our additions = IPythonShellWidget) and an WebView info widget to
print kernel error and other messages.
"""
SEPARATOR = '%s##---(%s)---' % (os.linesep*2, time.ctime())
def __init__(self, plugin, name, history_filename, connection_file=None,
hostname=None, sshkey=None, password=None,
kernel_widget_id=None, menu_actions=None):
super(IPythonClient, self).__init__(plugin)
SaveHistoryMixin.__init__(self)
self.options_button = None
# stop button and icon
self.stop_button = None
self.stop_icon = get_icon("stop.png")
self.connection_file = connection_file
self.kernel_widget_id = kernel_widget_id
self.hostname = hostname
self.sshkey = sshkey
self.password = password
self.name = name
self.get_option = plugin.get_option
self.shellwidget = IPythonShellWidget(config=self.shellwidget_config(),
local_kernel=False)
self.shellwidget.hide()
self.infowidget = WebView(self)
self.menu_actions = menu_actions
self.history_filename = get_conf_path(history_filename)
self.history = []
self.namespacebrowser = None
self.set_infowidget_font()
self.loading_page = self._create_loading_page()
self.infowidget.setHtml(self.loading_page)
vlayout = QVBoxLayout()
toolbar_buttons = self.get_toolbar_buttons()
hlayout = QHBoxLayout()
for button in toolbar_buttons:
hlayout.addWidget(button)
vlayout.addLayout(hlayout)
vlayout.setContentsMargins(0, 0, 0, 0)
vlayout.addWidget(self.shellwidget)
vlayout.addWidget(self.infowidget)
self.setLayout(vlayout)
self.exit_callback = lambda: plugin.close_client(client=self)
#------ Public API --------------------------------------------------------
def show_shellwidget(self, give_focus=True):
"""Show shellwidget and configure it"""
self.infowidget.hide()
self.shellwidget.show()
self.infowidget.setHtml(BLANK)
if give_focus:
self.get_control().setFocus()
# Connect shellwidget to the client
self.shellwidget.set_ipyclient(self)
# To save history
self.shellwidget.executing.connect(self.add_to_history)
# For Mayavi to run correctly
self.shellwidget.executing.connect(self.set_backend_for_mayavi)
# To update history after execution
self.shellwidget.executed.connect(self.update_history)
# To update the Variable Explorer after execution
self.shellwidget.executed.connect(self.auto_refresh_namespacebrowser)
# To show a stop button, when executing a process
self.shellwidget.executing.connect(self.enable_stop_button)
# To hide a stop button after execution stopped
self.shellwidget.executed.connect(self.disable_stop_button)
def enable_stop_button(self):
self.stop_button.setEnabled(True)
def disable_stop_button(self):
self.stop_button.setDisabled(True)
def stop_button_click_handler(self):
self.stop_button.setDisabled(True)
self.interrupt_kernel()
def show_kernel_error(self, error):
"""Show kernel initialization errors in infowidget"""
# Remove explanation about how to kill the kernel (doesn't apply to us)
error = error.split('issues/2049')[-1]
# Remove unneeded blank lines at the beginning
eol = sourcecode.get_eol_chars(error)
if eol:
error = error.replace(eol, '<br>')
while error.startswith('<br>'):
error = error[4:]
# Remove connection message
if error.startswith('To connect another client') or \
error.startswith('[IPKernelApp] To connect another client'):
error = error.split('<br>')
error = '<br>'.join(error[2:])
# Don't break lines in hyphens
# From http://stackoverflow.com/q/7691569/438386
error = error.replace('-', '‑')
message = _("An error ocurred while starting the kernel")
kernel_error_template = Template(KERNEL_ERROR)
page = kernel_error_template.substitute(css_path=CSS_PATH,
message=message,
error=error)
self.infowidget.setHtml(page)
def show_restart_animation(self):
self.shellwidget.hide()
self.infowidget.setHtml(self.loading_page)
self.infowidget.show()
def get_name(self):
"""Return client name"""
return ((_("Console") if self.hostname is None else self.hostname)
+ " " + self.name)
def get_control(self):
"""Return the text widget (or similar) to give focus to"""
# page_control is the widget used for paging
page_control = self.shellwidget._page_control
if page_control and page_control.isVisible():
return page_control
else:
return self.shellwidget._control
def get_options_menu(self):
"""Return options menu"""
restart_action = create_action(self, _("Restart kernel"),
shortcut=QKeySequence("Ctrl+."),
icon=get_icon('restart.png'),
triggered=self.restart_kernel)
restart_action.setShortcutContext(Qt.WidgetWithChildrenShortcut)
# Main menu
if self.menu_actions is not None:
actions = [restart_action, None] + self.menu_actions
else:
actions = [restart_action]
return actions
def get_toolbar_buttons(self):
"""Return toolbar buttons list"""
#TODO: Eventually add some buttons (Empty for now)
# (see for example: spyderlib/widgets/externalshell/baseshell.py)
buttons = []
# Code to add the stop button
if self.stop_button is None:
self.stop_button = create_toolbutton(self, text=_("Stop"),
icon=self.stop_icon,
tip=_("Stop the current command"))
self.disable_stop_button()
# set click event handler
self.stop_button.clicked.connect(self.stop_button_click_handler)
if self.stop_button is not None:
buttons.append(self.stop_button)
if self.options_button is None:
options = self.get_options_menu()
if options:
self.options_button = create_toolbutton(self,
text=_("Options"), icon=get_icon('tooloptions.png'))
self.options_button.setPopupMode(QToolButton.InstantPopup)
menu = QMenu(self)
add_actions(menu, options)
self.options_button.setMenu(menu)
if self.options_button is not None:
buttons.append(self.options_button)
return buttons
def add_actions_to_context_menu(self, menu):
"""Add actions to IPython widget context menu"""
# See spyderlib/widgets/ipython.py for more details on this method
inspect_action = create_action(self, _("Inspect current object"),
QKeySequence(get_shortcut('console',
'inspect current object')),
icon=get_std_icon('MessageBoxInformation'),
triggered=self.inspect_object)
clear_line_action = create_action(self, _("Clear line or block"),
QKeySequence("Shift+Escape"),
icon=get_icon('eraser.png'),
triggered=self.clear_line)
clear_console_action = create_action(self, _("Clear console"),
QKeySequence(get_shortcut('console',
'clear shell')),
icon=get_icon('clear.png'),
triggered=self.clear_console)
quit_action = create_action(self, _("&Quit"), icon='exit.png',
triggered=self.exit_callback)
add_actions(menu, (None, inspect_action, clear_line_action,
clear_console_action, None, quit_action))
return menu
def set_font(self, font):
"""Set IPython widget's font"""
self.shellwidget._control.setFont(font)
self.shellwidget.font = font
def set_infowidget_font(self):
font = get_font('inspector', 'rich_text')
self.infowidget.set_font(font)
def interrupt_kernel(self):
"""Interrupt the associanted Spyder kernel if it's running"""
self.shellwidget.request_interrupt_kernel()
def restart_kernel(self):
"""Restart the associanted Spyder kernel"""
self.shellwidget.request_restart_kernel()
def inspect_object(self):
"""Show how to inspect an object with our object inspector"""
self.shellwidget._control.inspect_current_object()
def clear_line(self):
"""Clear a console line"""
self.shellwidget._keyboard_quit()
def clear_console(self):
"""Clear the whole console"""
self.shellwidget.execute("%clear")
def if_kernel_dies(self, t):
"""
Show a message in the console if the kernel dies.
t is the time in seconds between the death and showing the message.
"""
message = _("It seems the kernel died unexpectedly. Use "
"'Restart kernel' to continue using this console.")
self.shellwidget._append_plain_text(message + '\n')
def update_history(self):
self.history = self.shellwidget._history
def set_backend_for_mayavi(self, command):
calling_mayavi = False
lines = command.splitlines()
for l in lines:
if not l.startswith('#'):
if 'import mayavi' in l or 'from mayavi' in l:
calling_mayavi = True
break
if calling_mayavi:
message = _("Changing backend to Qt for Mayavi")
self.shellwidget._append_plain_text(message + '\n')
self.shellwidget.execute("%gui inline\n%gui qt")
def interrupt_message(self):
"""
Print an interrupt message when the client is connected to an external
kernel
"""
message = _("Kernel process is either remote or unspecified. "
"Cannot interrupt")
QMessageBox.information(self, "IPython", message)
def restart_message(self):
"""
Print a restart message when the client is connected to an external
kernel
"""
message = _("Kernel process is either remote or unspecified. "
"Cannot restart.")
QMessageBox.information(self, "IPython", message)
def set_namespacebrowser(self, namespacebrowser):
"""Set namespace browser widget"""
self.namespacebrowser = namespacebrowser
def auto_refresh_namespacebrowser(self):
"""Refresh namespace browser"""
if self.namespacebrowser:
self.namespacebrowser.refresh_table()
def shellwidget_config(self):
"""Generate a Config instance for shell widgets using our config
system
This lets us create each widget with its own config (as opposed to
IPythonQtConsoleApp, where all widgets have the same config)
"""
# ---- IPython config ----
try:
profile_path = osp.join(get_ipython_dir(), 'profile_default')
full_ip_cfg = load_pyconfig_files(['ipython_qtconsole_config.py'],
profile_path)
# From the full config we only select the IPythonWidget section
# because the others have no effect here.
ip_cfg = Config({'IPythonWidget': full_ip_cfg.IPythonWidget})
except:
ip_cfg = Config()
# ---- Spyder config ----
spy_cfg = Config()
# Make the pager widget a rich one (i.e a QTextEdit)
spy_cfg.IPythonWidget.kind = 'rich'
# Gui completion widget
gui_comp_o = self.get_option('use_gui_completion')
completions = {True: 'droplist', False: 'ncurses'}
spy_cfg.IPythonWidget.gui_completion = completions[gui_comp_o]
# Pager
pager_o = self.get_option('use_pager')
if pager_o:
spy_cfg.IPythonWidget.paging = 'inside'
else:
spy_cfg.IPythonWidget.paging = 'none'
# Calltips
calltips_o = self.get_option('show_calltips')
spy_cfg.IPythonWidget.enable_calltips = calltips_o
# Buffer size
buffer_size_o = self.get_option('buffer_size')
spy_cfg.IPythonWidget.buffer_size = buffer_size_o
# Prompts
in_prompt_o = self.get_option('in_prompt')
out_prompt_o = self.get_option('out_prompt')
if in_prompt_o:
spy_cfg.IPythonWidget.in_prompt = in_prompt_o
if out_prompt_o:
spy_cfg.IPythonWidget.out_prompt = out_prompt_o
# Merge IPython and Spyder configs. Spyder prefs will have prevalence
# over IPython ones
ip_cfg._merge(spy_cfg)
return ip_cfg
#------ Private API -------------------------------------------------------
def _create_loading_page(self):
loading_template = Template(LOADING)
loading_img = get_image_path('loading_sprites.png')
if os.name == 'nt':
loading_img = loading_img.replace('\\', '/')
message = _("Connecting to kernel...")
page = loading_template.substitute(css_path=CSS_PATH,
loading_img=loading_img,
message=message)
return page
#---- Qt methods ----------------------------------------------------------
def closeEvent(self, event):
"""
Reimplement Qt method to stop sending the custom_restart_kernel_died
signal
"""
kc = self.shellwidget.kernel_client
if kc is not None:
kc.hb_channel.pause()
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/spyderlib/widgets/ipython.py
|
Python
|
gpl-3.0
| 29,448
|
[
"Mayavi"
] |
b438106f6770393d7bfa191880d4d71584508110f01f980bbf1f48cff72eab85
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import warnings
from ..utils.exceptions import AstropyUserWarning
from ..extern.six.moves import range
__all__ = ['SigmaClip', 'sigma_clip', 'sigma_clipped_stats']
class SigmaClip(object):
"""
Class to perform sigma clipping.
The data will be iterated over, each time rejecting points that are
discrepant by more than a specified number of standard deviations
from a center value. If the data contains invalid values (NaNs or
infs), they are automatically masked before performing the sigma
clipping.
For a functional interface to sigma clipping, see
:func:`sigma_clip`.
.. note::
`scipy.stats.sigmaclip
<http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this class.
Parameters
----------
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
iters : int or `None`, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing). Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
See Also
--------
sigma_clip
Examples
--------
This example generates random variates from a Gaussian distribution
and returns a masked array in which all points that are more than 2
sample standard deviations from the median are masked::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=2, iters=5)
>>> filtered_data = sigclip(randvar)
This example sigma clips on a similar distribution, but uses 3 sigma
relative to the sample *mean*, clips until convergence, and does not
copy the data::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> sigclip = SigmaClip(sigma=3, iters=None, cenfunc=mean)
>>> filtered_data = sigclip(randvar, copy=False)
This example sigma clips along one axis on a similar distribution
(with bad points inserted)::
>>> from astropy.stats import SigmaClip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> sigclip = SigmaClip(sigma=2.3)
>>> filtered_data = sigclip(data, axis=0)
Note that along the other axis, no points would be masked, as the
variance is higher.
"""
def __init__(self, sigma=3., sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std):
self.sigma = sigma
self.sigma_lower = sigma_lower
self.sigma_upper = sigma_upper
self.iters = iters
self.cenfunc = cenfunc
self.stdfunc = stdfunc
def __repr__(self):
return ('SigmaClip(sigma={0}, sigma_lower={1}, sigma_upper={2}, '
'iters={3}, cenfunc={4}, stdfunc={5})'
.format(self.sigma, self.sigma_lower, self.sigma_upper,
self.iters, self.cenfunc, self.stdfunc))
def __str__(self):
lines = ['<' + self.__class__.__name__ + '>']
attrs = ['sigma', 'sigma_lower', 'sigma_upper', 'iters', 'cenfunc',
'stdfunc']
for attr in attrs:
lines.append(' {0}: {1}'.format(attr, getattr(self, attr)))
return '\n'.join(lines)
def _perform_clip(self, _filtered_data, axis=None):
"""
Perform sigma clip by comparing the data to the minimum and
maximum values (median + sig * standard deviation). Use
sigma_lower and sigma_upper to get the correct limits. Data
values less or greater than the minimum / maximum values
will have True set in the mask array.
"""
if _filtered_data.size == 0:
return _filtered_data
max_value = self.cenfunc(_filtered_data, axis=axis)
std = self.stdfunc(_filtered_data, axis=axis)
min_value = max_value - std * self.sigma_lower
max_value += std * self.sigma_upper
if axis is not None:
if axis != 0:
min_value = np.expand_dims(min_value, axis=axis)
max_value = np.expand_dims(max_value, axis=axis)
if max_value is np.ma.masked:
max_value = np.ma.MaskedArray(np.nan, mask=True)
min_value = np.ma.MaskedArray(np.nan, mask=True)
_filtered_data.mask |= _filtered_data > max_value
_filtered_data.mask |= _filtered_data < min_value
return _filtered_data
def __call__(self, data, axis=None, copy=True):
"""
Perform sigma clipping on the provided data.
Parameters
----------
data : array-like
The data to be sigma clipped.
axis : int or `None`, optional
If not `None`, clip along the given axis. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``,
which are expected to return an array with the axis
dimension removed (like the numpy functions). If `None`,
clip over all axes. Defaults to `None`.
copy : bool, optional
If `True`, the ``data`` array will be copied. If `False`,
the returned masked array data will contain the same array
as ``data``. Defaults to `True`.
Returns
-------
filtered_data : `numpy.ma.MaskedArray`
A masked array with the same shape as ``data`` input, where
the points rejected by the algorithm have been masked.
"""
if self.sigma_lower is None:
self.sigma_lower = self.sigma
if self.sigma_upper is None:
self.sigma_upper = self.sigma
if np.any(~np.isfinite(data)):
data = np.ma.masked_invalid(data)
warnings.warn('Input data contains invalid values (NaNs or '
'infs), which were automatically masked.',
AstropyUserWarning)
filtered_data = np.ma.array(data, copy=copy)
if self.iters is None:
lastrej = filtered_data.count() + 1
while filtered_data.count() != lastrej:
lastrej = filtered_data.count()
self._perform_clip(filtered_data, axis=axis)
else:
for i in range(self.iters):
self._perform_clip(filtered_data, axis=axis)
# prevent filtered_data.mask = False (scalar) if no values are clipped
if filtered_data.mask.shape == ():
# make .mask shape match .data shape
filtered_data.mask = False
return filtered_data
def sigma_clip(data, sigma=3, sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std, axis=None, copy=True):
"""
Perform sigma-clipping on the provided data.
The data will be iterated over, each time rejecting points that are
discrepant by more than a specified number of standard deviations from a
center value. If the data contains invalid values (NaNs or infs),
they are automatically masked before performing the sigma clipping.
For an object-oriented interface to sigma clipping, see
:func:`SigmaClip`.
.. note::
`scipy.stats.sigmaclip
<http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.sigmaclip.html>`_
provides a subset of the functionality in this function.
Parameters
----------
data : array-like
The data to be sigma clipped.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. Defaults to `None`.
iters : int or `None`, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing). Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
axis : int or `None`, optional
If not `None`, clip along the given axis. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``, which
are expected to return an array with the axis dimension removed
(like the numpy functions). If `None`, clip over all axes.
Defaults to `None`.
copy : bool, optional
If `True`, the ``data`` array will be copied. If `False`, the
returned masked array data will contain the same array as
``data``. Defaults to `True`.
Returns
-------
filtered_data : `numpy.ma.MaskedArray`
A masked array with the same shape as ``data`` input, where the
points rejected by the algorithm have been masked.
Notes
-----
1. The routine works by calculating::
deviation = data - cenfunc(data [,axis=int])
and then setting a mask for points outside the range::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
It will iterate a given number of times, or until no further
data are rejected.
2. Most numpy functions deal well with masked arrays, but if one
would like to have an array with just the good (or bad) values, one
can use::
good_only = filtered_data.data[~filtered_data.mask]
bad_only = filtered_data.data[filtered_data.mask]
However, for multidimensional data, this flattens the array,
which may not be what one wants (especially if filtering was
done along an axis).
See Also
--------
SigmaClip
Examples
--------
This example generates random variates from a Gaussian distribution
and returns a masked array in which all points that are more than 2
sample standard deviations from the median are masked::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=2, iters=5)
This example sigma clips on a similar distribution, but uses 3 sigma
relative to the sample *mean*, clips until convergence, and does not
copy the data::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import randn
>>> from numpy import mean
>>> randvar = randn(10000)
>>> filtered_data = sigma_clip(randvar, sigma=3, iters=None,
... cenfunc=mean, copy=False)
This example sigma clips along one axis on a similar distribution
(with bad points inserted)::
>>> from astropy.stats import sigma_clip
>>> from numpy.random import normal
>>> from numpy import arange, diag, ones
>>> data = arange(5) + normal(0., 0.05, (5, 5)) + diag(ones(5))
>>> filtered_data = sigma_clip(data, sigma=2.3, axis=0)
Note that along the other axis, no points would be masked, as the
variance is higher.
"""
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, iters=iters,
cenfunc=cenfunc, stdfunc=stdfunc)
return sigclip(data, axis=axis, copy=copy)
def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0,
sigma_lower=None, sigma_upper=None, iters=5,
cenfunc=np.ma.median, stdfunc=np.std, std_ddof=0,
axis=None):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array-like
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use as the lower and upper
clipping limit. These limits are overridden by ``sigma_lower``
and ``sigma_upper``, if input. Defaults to 3.
sigma_lower : float, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is used.
Defaults to `None`.
sigma_upper : float, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is used.
Defaults to `None`.
iters : int, optional
The number of iterations to perform sigma clipping, or `None` to
clip until convergence is achieved (i.e., continue until the
last iteration clips nothing) when calculating the statistics.
Defaults to 5.
cenfunc : callable, optional
The function used to compute the center for the clipping. Must
be a callable that takes in a masked array and outputs the
central value. Defaults to the median (`numpy.ma.median`).
stdfunc : callable, optional
The function used to compute the standard deviation about the
center. Must be a callable that takes in a masked array and
outputs a width estimator. Masked (rejected) pixels are those
where::
deviation < (-sigma_lower * stdfunc(deviation))
deviation > (sigma_upper * stdfunc(deviation))
where::
deviation = data - cenfunc(data [,axis=int])
Defaults to the standard deviation (`numpy.std`).
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is zero.
axis : int or `None`, optional
If not `None`, clip along the given axis. For this case,
``axis`` will be passed on to ``cenfunc`` and ``stdfunc``, which
are expected to return an array with the axis dimension removed
(like the numpy functions). If `None`, clip over all axes.
Defaults to `None`.
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
data_clip = sigma_clip(data, sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, iters=iters,
cenfunc=cenfunc, stdfunc=stdfunc, axis=axis)
mean = np.ma.mean(data_clip, axis=axis)
median = np.ma.median(data_clip, axis=axis)
std = np.ma.std(data_clip, ddof=std_ddof, axis=axis)
if axis is None and np.ma.isMaskedArray(median):
# With Numpy 1.10 np.ma.median always return a MaskedArray, even with
# one element. So for compatibility with previous versions, we take the
# scalar value
median = median.item()
return mean, median, std
|
kelle/astropy
|
astropy/stats/sigma_clipping.py
|
Python
|
bsd-3-clause
| 18,281
|
[
"Gaussian"
] |
b095710938489da3723ef449d8d720abdcd5caeddcf376c363290dd90e4839d3
|
# $Id$
from module_base import ModuleBase
from module_mixins import ScriptedConfigModuleMixin
import module_utils
import vtk
import wx
class metaImageRDR(ScriptedConfigModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._reader = vtk.vtkMetaImageReader()
module_utils.setup_vtk_object_progress(self, self._reader,
'Reading MetaImage data.')
self._config.filename = ''
configList = [
('File name:', 'filename', 'base:str', 'filebrowser',
'The name of the MetaImage file you want to load.',
{'fileMode' : wx.OPEN,
'fileMask' :
'MetaImage single file (*.mha)|*.mha|MetaImage separate header '
'(*.mhd)|*.mhd|All files (*.*)|*.*'})]
ScriptedConfigModuleMixin.__init__(
self, configList,
{'Module (self)' : self,
'vtkMetaImageReader' : self._reader})
self.sync_module_logic_with_config()
def close(self):
# we play it safe... (the graph_editor/module_manager should have
# disconnected us by now)
for input_idx in range(len(self.get_input_descriptions())):
self.set_input(input_idx, None)
# this will take care of all display thingies
ScriptedConfigModuleMixin.close(self)
ModuleBase.close(self)
# get rid of our reference
del self._reader
def get_input_descriptions(self):
return ()
def set_input(self, idx, inputStream):
raise Exception
def get_output_descriptions(self):
return ('vtkImageData',)
def get_output(self, idx):
return self._reader.GetOutput()
def logic_to_config(self):
self._config.filename = self._reader.GetFileName()
def config_to_logic(self):
self._reader.SetFileName(self._config.filename)
def execute_module(self):
self._reader.Update()
|
nagyistoce/devide
|
modules/readers/metaImageRDR.py
|
Python
|
bsd-3-clause
| 2,082
|
[
"VTK"
] |
694758cc5f414af50b21250c196396504a82243e7bd51dae7757dded1527b870
|
import unittest
from pyml.linear_models import LinearRegression, LogisticRegression
from pyml.linear_models.base import LinearBase
from pyml.datasets import regression, gaussian
from pyml.preprocessing import train_test_split
class LinearRegressionGradientDescentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = regression(100, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.regressor = LinearRegression(seed=1970, solver='gradient_descent')
cls.regressor.train(X=cls.X_train, y=cls.y_train)
def test_LinR_iterations(self):
self.assertEqual(self.regressor.iterations, 7)
def test_LinR_coefficients(self):
self.assertAlmostEqual(self.regressor.coefficients[0], 0.4907136205265401, delta=0.001)
self.assertAlmostEqual(self.regressor.coefficients[1], 0.9034467828351432, delta=0.001)
def test_LinR_cost(self):
self.assertAlmostEqual(self.regressor.cost[0], 3.5181936893597365, delta=0.001)
self.assertAlmostEqual(self.regressor.cost[-1], 0.4868770157376261, delta=0.001)
def test_LinR_predict(self):
self.assertAlmostEqual(self.regressor.predict(self.X_test)[0], 3.8176098320897065, delta=0.001)
def test_LinR_train_predict(self):
self.assertAlmostEqual(self.regressor.train_predict(self.X_train, self.y_train)[0], 9.770956237446251,
delta=0.001)
def test_LinR_mse(self):
self.assertAlmostEqual(self.regressor.score(self.X_test, self.y_test), 1.3280324597827904, delta=0.001)
def test_LinR_mae(self):
self.assertAlmostEqual(self.regressor.score(self.X_test, self.y_test, scorer='mae'),
0.9126392424298799, delta=0.001)
def test_LinR_seed(self):
self.assertEqual(self.regressor.seed, 1970)
def test_LinR_solver_error(self):
self.assertRaises(ValueError, LinearRegression, 1970, True, 'unknown_solver')
class GradientDescentTest(unittest.TestCase):
def test_GD_opt_InitError(self):
self.assertRaises(ValueError, LinearBase, 0.01, 0.01, 10, 0.9, 64, 0.1, 'amazing_optimiser_algo', None,
'regressor')
class LinearRegressionOLSTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = regression(100, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.regressor = LinearRegression(seed=1970, solver='OLS')
cls.regressor.train(X=cls.X_train, y=cls.y_train)
def test_OLS_coefficients(self):
self.assertAlmostEqual(self.regressor.coefficients[0], 0.518888884839874, delta=0.001)
self.assertAlmostEqual(self.regressor.coefficients[1], 0.9128356664164721, delta=0.001)
def test_OLS_predict(self):
self.assertAlmostEqual(self.regressor.predict(self.X_test)[0], 3.880359176261411, delta=0.001)
def test_OLS_mse(self):
self.assertAlmostEqual(self.regressor.score(self.X_test, self.y_test), 1.34151578011058, delta=0.001)
class LogisticRegressionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = X, y = gaussian(labels=2, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970)
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_LogR_iterations(self):
self.assertEqual(self.classifier.iterations, 1623)
def test_LogR_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0], -1.1576475345638408, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1], 0.1437129269620468, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2], 2.4464052394504856, delta=0.001)
def test_LogR_cost(self):
self.assertAlmostEqual(self.classifier.cost[0], -106.11158912690777, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[-1], -61.15035744417768, delta=0.001)
def test_LogR_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_LogR_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0], 0.807766417948826, delta=0.001)
def test_LogR_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.975, delta=0.001)
def test_LogR_seed(self):
self.assertEqual(self.classifier.seed, 1970)
class MultiClassLogisticRegressionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = X, y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970)
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogR_iterations(self):
self.assertEqual(self.classifier.iterations[0], 3829)
self.assertEqual(self.classifier.iterations[1], 4778)
self.assertEqual(self.classifier.iterations[2], 3400)
def test_MLogR_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -2.504659172303325, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 0.9999686753579901, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 0.5430990877594853, delta=0.001)
def test_MLogR_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -79.28190020206335, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -110.82234100438215, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -77.51659078552537, delta=0.001)
def test_MLogR_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogR_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.18176321188156466, delta=0.001)
def test_MLogR_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9666666666666667, delta=0.001)
def test_MLogR_seed(self):
self.assertEqual(self.classifier.seed, 1970)
class MultiClassLogisticRegressionwithMomentumTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = X, y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls.X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, alpha=0.9)
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRMom_iterations(self):
self.assertEqual(self.classifier.iterations[0], 1671)
self.assertEqual(self.classifier.iterations[1], 1691)
self.assertEqual(self.classifier.iterations[2], 1546)
def test_MLogRMom_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -6.179813361986948, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 3.915365241814121, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 1.4391187417309603, delta=0.001)
def test_MLogRMom_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -38.80552082812185, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -71.11678230563942, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -39.44585417456268, delta=0.001)
def test_MLogRMom_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRMom_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.04680865754859053, delta=0.001)
def test_MLogRMom_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionMiniBatch(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, alpha=0.9, batch_size=64)
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRMin_iterations(self):
self.assertEqual(self.classifier.iterations[0], 905)
self.assertEqual(self.classifier.iterations[1], 789)
self.assertEqual(self.classifier.iterations[2], 841)
def test_MLogRMin_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -8.55590366737834, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 5.300981091494979, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 1.9547910473910273, delta=0.001)
def test_MLogRMin_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -28.947086360092676, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -63.442959967464574, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -30.33741258448764, delta=0.001)
def test_MLogRMin_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRMin_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.017049079187284634, delta=0.001)
def test_MLogRMin_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionNesterovOpt(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, alpha=0.9, method='nesterov')
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRNesOpt_iterations(self):
self.assertEqual(self.classifier.iterations[0], 1673)
self.assertEqual(self.classifier.iterations[1], 1692)
self.assertEqual(self.classifier.iterations[2], 1548)
def test_MLogRNesOpt_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -6.180431021808369, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 3.914247513063426, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 1.4391579779616654, delta=0.001)
def test_MLogRNesOpt_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -38.80233474838201, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -71.1246527942097, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -39.44375056521835, delta=0.001)
def test_MLogRNesOpt_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRNesOpt_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.046812477405301665, delta=0.001)
def test_MLogRNesOpt_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionAdagradOpt(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, alpha=0.98, method='adagrad')
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRAdagradOpt_iterations(self):
self.assertEqual(self.classifier.iterations[0], 1187)
self.assertEqual(self.classifier.iterations[1], 317)
self.assertEqual(self.classifier.iterations[2], 436)
def test_MLogRAdagradOpt_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -7.3539416411807474, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 6.203333163198617, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 3.2839577904188673, delta=0.001)
def test_MLogRAdagradOpt_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -33.083261965268775, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -60.5014103879351, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -26.87955154897393, delta=0.001)
def test_MLogRAdagradOpt_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRAdagradOpt_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.01683721954111243, delta=0.001)
def test_MLogRAdagradOpt_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionAdadeltaOpt(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, learning_rate=1, alpha=0.93, method='adadelta')
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRAdagradOpt_iterations(self):
self.assertEqual(self.classifier.iterations[0], 50)
self.assertEqual(self.classifier.iterations[1], 31)
self.assertEqual(self.classifier.iterations[2], 59)
def test_MLogRAdagradOpt_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -17.69287148773692, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 8.833657315503745, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 3.612736014955706, delta=0.001)
def test_MLogRAdagradOpt_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -19.461391357431822, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -58.89195462566389, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -22.090057493027633, delta=0.001)
def test_MLogRAdagradOpt_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRAdagradOpt_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.0008593472329257797, delta=0.001)
def test_MLogRAdagradOpt_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
class MultiClassLogisticRegressionRMSpropOpt(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.X, cls.y = gaussian(labels=3, sigma=0.2, seed=1970)
cls.X_train, cls.y_train, cls.X_test, cls.y_test = train_test_split(cls .X, cls.y,
train_split=0.8, seed=1970)
cls.classifier = LogisticRegression(seed=1970, learning_rate=1, alpha=0.99, method='rmsprop')
cls.classifier.train(X=cls.X_train, y=cls.y_train)
def test_MLogRAdagradOpt_iterations(self):
self.assertEqual(self.classifier.iterations[0], 212)
self.assertEqual(self.classifier.iterations[1], 32)
self.assertEqual(self.classifier.iterations[2], 71)
def test_MLogRAdagradOpt_coefficients(self):
self.assertAlmostEqual(self.classifier.coefficients[0][-1], -15.450318804509942, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[1][-1], 9.072690798494117, delta=0.001)
self.assertAlmostEqual(self.classifier.coefficients[2][-1], 4.915283731375461, delta=0.001)
def test_MLogRAdagradOpt_cost(self):
self.assertAlmostEqual(self.classifier.cost[0][-1], -19.841625448780448, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[1][-1], -58.50817514694194, delta=0.001)
self.assertAlmostEqual(self.classifier.cost[2][-1], -21.77574545155869, delta=0.001)
def test_MLogRAdagradOpt_predict(self):
self.assertEqual(self.classifier.predict(self.X_test)[0], 1)
def test_MLogRAdagradOpt_predict_proba(self):
self.assertAlmostEqual(self.classifier.predict_proba(self.X_test)[0][0], 0.0009482861157384186, delta=0.001)
def test_MLogRAdagradOpt_accuracy(self):
self.assertAlmostEqual(self.classifier.score(self.X_test, self.y_test), 0.9833333333333333, delta=0.001)
|
gf712/PyML
|
tests/regression_tests.py
|
Python
|
mit
| 17,822
|
[
"Gaussian"
] |
c3cf9a4eebd4cd4f0f1e39baa8e334005eed076a27dd83a5dc65a2fa342d32ed
|
"""Support library for the IEM Reanalysis code
.. data:: SOUTH
Latitude of the southern edge of the IEM Reanalysis.
"""
import string
import random
from datetime import timezone, datetime
from affine import Affine
import numpy as np
import xarray as xr
from pyiem.util import get_dbconn
# 1/8 degree grid, grid cell is the lower left corner
SOUTH = 23.0
WEST = -126.0
NORTH = 50.0
EAST = -65.0
DX = 0.125
DY = 0.125
# hard coding these to prevent flakey behaviour with dynamic computation
NX = 488 # int((EAST - WEST) / DX)
NY = 216 # int((NORTH - SOUTH) / DY)
XAXIS = np.arange(WEST, EAST, DX)
YAXIS = np.arange(SOUTH, NORTH, DY)
AFFINE = Affine(DX, 0.0, WEST, 0.0, 0 - DY, NORTH)
MRMS_AFFINE = Affine(0.01, 0.0, WEST, 0.0, -0.01, NORTH)
def get_table(valid):
"""Figure out which table should be used for given valid.
Args:
valid (datetime or date): which time is of interest
Returns:
str tablename
"""
# careful here, a datetime is not an instance of date
if isinstance(valid, datetime):
table = "iemre_hourly_%s" % (
valid.astimezone(timezone.utc).strftime("%Y%m"),
)
else:
table = f"iemre_daily_{valid.year}"
return table
def set_grids(valid, ds, cursor=None, table=None):
"""Update the database with a given ``xarray.Dataset``.
Args:
valid (datetime or date): If datetime, save hourly, if date, save daily
ds (xarray.Dataset): The xarray dataset to save
cursor (database cursor, optional): cursor to use for queries
table (str,optional): hard coded database table to use to set the data
on. Usually dynamically computed.
"""
table = table if table is not None else get_table(valid)
commit = cursor is None
if cursor is None:
pgconn = get_dbconn("iemre")
cursor = pgconn.cursor()
# see that we have database entries, otherwise create them
cursor.execute(
f"SELECT valid from {table} WHERE valid = %s LIMIT 1", (valid,)
)
insertmode = True
if cursor.rowcount == 1:
# Update mode, we do some massive tricks :/
temptable = "".join(random.choices(string.ascii_uppercase, k=24))
insertmode = False
update_cols = ", ".join(
["%s = $%i" % (v, i + 1) for i, v in enumerate(ds)]
)
arg = "$%i" % (len(ds) + 1,)
# approximate table size is 10 MB
cursor.execute("SET temp_buffers = '100MB'")
cursor.execute(
f"CREATE UNLOGGED TABLE {temptable} AS SELECT * from {table} "
f"WHERE valid = '{valid}'"
)
cursor.execute(f"CREATE INDEX on {temptable}(gid)")
cursor.execute(
(
f"PREPARE pyiem_iemre_plan as UPDATE {temptable} "
f"SET {update_cols} WHERE gid = {arg}"
)
)
else:
# Insert mode
insert_cols = ", ".join(["%s" % (v,) for v in ds])
percents = ", ".join(["$%i" % (i + 2,) for i in range(len(ds))])
cursor.execute(
f"PREPARE pyiem_iemre_plan as INSERT into {table} "
f"(gid, valid, {insert_cols}) VALUES($1, '{valid}', {percents})"
)
sql = "execute pyiem_iemre_plan (%s)" % (",".join(["%s"] * (len(ds) + 1)),)
def _n(val):
"""Prevent nan"""
return None if np.isnan(val) else float(val)
# Implementation notes: xarray iteration was ~25 secs, loading into memory
# instead is a few seconds :/
pig = {v: ds[v].values for v in ds}
for y in range(ds.dims["y"]):
for x in range(ds.dims["x"]):
arr = [_n(pig[v][y, x]) for v in ds]
if insertmode:
arr.insert(0, y * NX + x)
else:
arr.append(y * NX + x)
cursor.execute(sql, arr)
if not insertmode:
# Undo our hackery above
cursor.execute(f"DELETE from {table} WHERE valid = '{valid}'")
cursor.execute(f"INSERT into {table} SELECT * from {temptable}")
cursor.execute(f"DROP TABLE {temptable}")
# If we generate a cursor, we should save it
if commit:
cursor.close()
pgconn.commit()
else:
cursor.execute("""DEALLOCATE pyiem_iemre_plan""")
def get_grids(valid, varnames=None, cursor=None, table=None):
"""Fetch grid(s) from the database, returning xarray.
Args:
valid (datetime or date): If datetime, load hourly, if date, load daily
varnames (str or list,optional): Which variables to fetch from database,
defaults to all available
cursor (database cursor,optional): cursor to use for query
table (str,optional): Hard coded table to fetch data from, useful in the
case of forecast data.
Returns:
``xarray.Dataset``"""
table = table if table is not None else get_table(valid)
if cursor is None:
pgconn = get_dbconn("iemre")
cursor = pgconn.cursor()
# rectify varnames
if isinstance(varnames, str):
varnames = [varnames]
# Compute variable names
cursor.execute(
"SELECT column_name FROM information_schema.columns "
"WHERE table_schema = 'public' AND table_name = %s and "
"column_name not in ('gid', 'valid')",
(table,),
)
use_columns = []
for row in cursor:
if not varnames or row[0] in varnames:
use_columns.append(row[0])
colsql = ",".join(use_columns)
cursor.execute(
f"SELECT (gid / %s)::int as y, gid %% %s as x, {colsql} "
f"from {table} WHERE valid = %s",
(NX, NX, valid),
)
data = dict((key, np.full((NY, NX), np.nan)) for key in use_columns)
for row in cursor:
for i, col in enumerate(use_columns):
data[col][row[0], row[1]] = row[2 + i]
ds = xr.Dataset(
dict((key, (["y", "x"], data[key])) for key in data),
coords={"lon": (["x"], XAXIS), "lat": (["y"], YAXIS)},
)
return ds
def get_dailyc_ncname():
"""Return the filename of the daily climatology netcdf file"""
return "/mesonet/data/iemre/iemre_dailyc.nc"
def get_daily_ncname(year):
"""Get the daily netcdf filename for the given year"""
return f"/mesonet/data/iemre/{year}_iemre_daily.nc"
def get_dailyc_mrms_ncname():
"""Get the MRMS daily climatology filename"""
return "/mesonet/data/mrms/mrms_dailyc.nc"
def get_daily_mrms_ncname(year):
"""Get the daily netcdf MRMS filename for the given year"""
return f"/mesonet/data/mrms/{year}_mrms_daily.nc"
def get_hourly_ncname(year):
"""Get the daily netcdf filename for the given year"""
return f"/mesonet/data/iemre/{year}_iemre_hourly.nc"
def daily_offset(ts):
"""Compute the timestamp index in the netcdf file"""
# In case ts is passed here as a datetime.date object
ts = datetime(ts.year, ts.month, ts.day)
base = ts.replace(
month=1, day=1, hour=0, minute=0, second=0, microsecond=0
)
days = (ts - base).days
return int(days)
def hourly_offset(dtobj):
"""Return time index for given timestamp
Args:
dtobj (datetime): datetime, if no tzinfo, we assume it is UTC
Returns:
int time index in the netcdf file
"""
if dtobj.tzinfo and dtobj.tzinfo != timezone.utc:
dtobj = dtobj.astimezone(timezone.utc)
base = dtobj.replace(
month=1, day=1, hour=0, minute=0, second=0, microsecond=0
)
seconds = (dtobj - base).total_seconds()
return int(seconds / 3600.0)
def find_ij(lon, lat):
"""Compute which grid cell this lon, lat resides within"""
if lon < WEST or lon >= EAST or lat < SOUTH or lat >= NORTH:
return None, None
i = np.digitize([lon], XAXIS)[0] - 1
j = np.digitize([lat], YAXIS)[0] - 1
return i, j
def get_gid(lon, lat):
"""Compute the grid id for the given location."""
i, j = find_ij(lon, lat)
if i is None:
return None
return j * NX + i
|
akrherz/pyIEM
|
src/pyiem/iemre.py
|
Python
|
mit
| 7,932
|
[
"NetCDF"
] |
1ca415e6604a7d9b40494be44384655a7ea67a7a06442e0db4d081c5c5f726e3
|
import ast
import itertools
import multiprocessing
import logging
from enum import Enum
log = logging.getLogger(__name__)
class SymbolKind(Enum):
"""SymbolKind corresponds to the SymbolKind enum type found in the LSP
spec."""
File = 1
Module = 2
Namespace = 3
Package = 4
Class = 5
Method = 6
Property = 7
Field = 8
Constructor = 9
Enum = 10
Interface = 11
Function = 12
Variable = 13
Constant = 14
String = 15
Number = 16
Boolean = 17
Array = 18
class Symbol:
def __init__(self, name, kind, line, col, container=None, file=None):
self.name = name
self.kind = kind
self.line = line
self.col = col
self.container = container
self.file = file
def score(self, query: str) -> int:
"""Score a symbol based on how well it matches a query.
Useful for sorting.
"""
score = 0
if self.kind == SymbolKind.Class:
score += 1
if self.kind != SymbolKind.Variable:
score += 1
if self.container is None:
score += 1
if self.file and 'test' not in self.file:
score += 5
if query == "":
return score
min_score = score
l_name, l_query = self.name.lower(), query.lower()
if query == self.name:
score += 10
elif l_name == l_query:
score += 8
if self.name.startswith(query):
score += 5
elif l_name.startswith(l_query):
score += 4
if l_query in l_name:
score += 2
if self.container:
if self.container.lower().startswith(l_query):
score += 2
if l_query == self.container.lower() + "." + l_name:
score += 10
if self.file and self.file.lower().startswith(l_query):
score += 1
if score <= min_score:
score = -1
return score
def json_object(self):
d = {
"name": self.name,
"kind": self.kind.value,
"location": {
"uri": "file://" + self.file,
"range": {
"start": {
"line": self.line - 1,
"character": self.col,
},
"end": {
"line": self.line - 1,
"character": self.col + len(self.name),
}
}
},
}
if self.container is not None:
d["containerName"] = self.container
return d
def extract_symbols(source, path):
"""extract_symbols is a generator yielding symbols for source."""
try:
tree = ast.parse(source)
except SyntaxError as e:
log.error("Error parsing Python file %s:%s -- %s: %s",
path, e.lineno, e.msg, e.text)
return
s = SymbolVisitor()
for j in s.visit(tree):
j.file = path
yield j
def extract_exported_symbols(source, path):
def is_exported(s): return not (s.name.startswith('_') or (
s.container is not None and s.container.startswith('_')))
return filter(is_exported, extract_symbols(source, path))
def workspace_symbols(fs, root_path, parent_span):
"""returns a list of all exported symbols under root_path in fs."""
py_paths = (path for path in fs.walk(root_path) if path.endswith(".py"))
py_srces = fs.batch_open(py_paths, parent_span)
with multiprocessing.Pool() as p:
symbols_chunks = p.imap_unordered(
_imap_extract_exported_symbols, py_srces, chunksize=10)
symbols = list(itertools.chain.from_iterable(symbols_chunks))
return symbols
# This exists purely for passing into imap
def _imap_extract_exported_symbols(args):
path, src = args
return list(extract_exported_symbols(src, path))
class SymbolVisitor:
def visit_Module(self, node, container):
# Modules is our global scope. Just visit all the children
yield from self.generic_visit(node)
def visit_ClassDef(self, node, container):
yield Symbol(node.name, SymbolKind.Class, node.lineno, node.col_offset)
# Visit all child symbols, but with container set to the class
yield from self.generic_visit(node, container=node.name)
def visit_FunctionDef(self, node, container):
yield Symbol(
node.name,
SymbolKind.Function if container is None else SymbolKind.Method,
node.lineno,
node.col_offset,
container=container)
def visit_Assign(self, assign_node, container):
for node in assign_node.targets:
if not hasattr(node, "id"):
continue
yield Symbol(
node.id,
SymbolKind.Variable,
node.lineno,
node.col_offset,
container=container)
def visit_If(self, node, container):
# If is often used provide different implementations for the same var. To avoid duplicate
# names, we only visit the true body.
for child in node.body:
yield from self.visit(child, container)
# Based on ast.NodeVisitor.visit
def visit(self, node, container=None):
# Two changes from ast.NodeVisitor.visit:
# * Do not fallback to generic_visit (we only care about top-level)
# * container optional argument
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, None)
if visitor is not None:
yield from visitor(node, container)
# Based on ast.NodeVisitor.generic_visit
def generic_visit(self, node, container=None):
for field, value in ast.iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, ast.AST):
yield from self.visit(item, container)
elif isinstance(value, ast.AST):
yield from self.visit(value, container)
|
sourcegraph/python-langserver
|
langserver/symbols.py
|
Python
|
mit
| 6,135
|
[
"VisIt"
] |
558218d73035f38f8f04a2f5fec9cb28b35419143229ac4fd10ca6a903e41e36
|
# Copyright 2012, 2013 The GalSim developers:
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
#
# GalSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GalSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GalSim. If not, see <http://www.gnu.org/licenses/>
#
"""@file real.py
Functions for dealing with galsim.RealGalaxy objects and the catalogs that store their data.
The galsim.RealGalaxy uses images of galaxies from real astrophysical data (e.g. the Hubble Space
Telescope), along with a PSF model of the optical properties of the telescope that took these
images, to simulate new galaxy images with a different (must be larger) telescope PSF. A
description of the simulation method can be found in Section 5 of Mandelbaum et al. (2012; MNRAS,
540, 1518), although note that the details of the implementation in Section 7 of that work are not
relevant to the more recent software used here.
This module defines the RealGalaxyCatalog class, used to store all required information about a
real galaxy simulation training sample and accompanying PSF model. For information about
downloading GalSim-readable RealGalaxyCatalog data in FITS format, see the RealGalaxy Data Download
page on the GalSim Wiki:
https://github.com/GalSim-developers/GalSim/wiki/RealGalaxy%20Data%20Download%20Page
The function simReal takes this information and uses it to simulate a (no-noise-added) image from
some lower-resolution telescope.
"""
import galsim
import utilities
from galsim import GSObject
class RealGalaxy(GSObject):
"""A class describing real galaxies from some training dataset. It's underlying implementation
uses Convolve instance of an InterpolatedImage (for the observed galaxy) with a Deconvolve
of another InterpolatedImage (for the PSF).
This class uses a catalog describing galaxies in some training data (for more details, see the
RealGalaxyCatalog documentation) to read in data about realistic galaxies that can be used for
simulations based on those galaxies. Also included in the class is additional information that
might be needed to make or interpret the simulations, e.g., the noise properties of the training
data.
The GSObject drawShoot method is unavailable for RealGalaxy instances.
Initialization
--------------
real_galaxy = galsim.RealGalaxy(real_galaxy_catalog, index=None, id=None, random=False,
rng=None, x_interpolant=None, k_interpolant=None,
flux=None, pad_factor = 0, noise_pad=False, pad_image=None,
use_cache = True)
This initializes real_galaxy with three InterpolatedImage objects (one for the deconvolved
galaxy, and saved versions of the original HST image and PSF). Note that there are multiple
keywords for choosing a galaxy; exactly one must be set. In future we may add more such
options, e.g., to choose at random but accounting for the non-constant weight factors
(probabilities for objects to make it into the training sample).
Note that preliminary tests suggest that for optimal balance between accuracy and speed,
`k_interpolant` and `pad_factor` should be kept at their default values. The user should be
aware that significant inaccuracy can result from using other combinations of these parameters;
see devel/modules/finterp.pdf, especially table 1, in the GalSim repository.
@param real_galaxy_catalog A RealGalaxyCatalog object with basic information about where to
find the data, etc.
@param index Index of the desired galaxy in the catalog.
@param id Object ID for the desired galaxy in the catalog.
@param random If true, then just select a completely random galaxy from the
catalog.
@param rng A random number generator to use for selecting a random galaxy
(may be any kind of BaseDeviate or None) and to use in generating
any noise field when padding. This user-input random number
generator takes precedence over any stored within a user-input
CorrelatedNoise instance (see `noise_pad` param below).
@param x_interpolant Either an Interpolant2d (or Interpolant) instance or a string
indicating which real-space interpolant should be used. Options
are 'nearest', 'sinc', 'linear', 'cubic', 'quintic', or 'lanczosN'
where N should be the integer order to use. [default
`x_interpolant = galsim.Quintic()'].
@param k_interpolant Either an Interpolant2d (or Interpolant) instance or a string
indicating which k-space interpolant should be used. Options are
'nearest', 'sinc', 'linear', 'cubic', 'quintic', or 'lanczosN'
where N should be the integer order to use. We strongly recommend
leaving this parameter at its default value; see text above for
details. [default `k_interpolant = galsim.Quintic()'].
@param flux Total flux, if None then original flux in galaxy is adopted without
change [default `flux = None`].
@param pad_factor Factor by which to pad the Image when creating the
InterpolatedImage; `pad_factor <= 0` results in the use of the
default value, 4. We strongly recommend leaving this parameter at
its default value; see text above for details.
[Default `pad_factor = 0`.]
@param noise_pad Pad the Interpolated image with zeros, or with noise of a level
specified in the training dataset? There are several options here:
Use `noise_pad = False` if you wish to pad with zeros.
Use `noise_pad = True` if you wish to pad with uncorrelated
noise of the proper variance.
Set `noise_pad` equal to a galsim.CorrelatedNoise, an Image, or
a filename containing an Image of an example noise field
that will be used to calculate the noise power spectrum and
generate noise in the padding region. Any random number
generator passed to the `rng` keyword will take precedence
over that carried in an input galsim.CorrelatedNoise.
In the last case, if the same file is used repeatedly, then use of
the `use_cache` keyword (see below) can be used to prevent the need
for repeated galsim.CorrelatedNoise initializations.
[default `noise_pad = False`]
@param pad_image Image to be used for deterministically padding the original image.
This can be specified in two ways:
(a) as a galsim.Image; or
(b) as a string which is interpreted as a filename containing an
image to use.
The size of the image that is passed in is taken to specify the
amount of padding, and so the `pad_factor` keyword should be equal
to 1, i.e., no padding. The `pad_image` scale is ignored, and taken
to be equal to that of the `image`. Note that `pad_image` can be
used together with `noise_pad`. However, the user should be careful
to ensure that the image used for padding has roughly zero mean.
The purpose of this keyword is to allow for a more flexible
representation of some noise field around an object; if the user
wishes to represent the sky level around an object, they should do
that when they have drawn the final image instead. [Default
`pad_image = None`.]
@param use_cache Specify whether to cache noise_pad read in from a file to save
having to build an CorrelatedNoise repeatedly from the same image.
[Default `use_cache = True`]
@param gsparams You may also specify a gsparams argument. See the docstring for
galsim.GSParams using help(galsim.GSParams) for more information
about this option.
Methods
-------
The RealGalaxy is a GSObject, and inherits all of the GSObject methods (draw(), applyShear(),
etc. except drawShoot() which is unavailable), and operator bindings.
"""
# Initialization parameters of the object, with type information
_req_params = {}
_opt_params = { "x_interpolant" : str ,
"k_interpolant" : str,
"flux" : float ,
"pad_factor" : float,
"noise_pad" : str,
"pad_image" : str}
_single_params = [ { "index" : int , "id" : str } ]
_takes_rng = True
_cache_noise_pad = {}
_cache_variance = {}
# --- Public Class methods ---
def __init__(self, real_galaxy_catalog, index=None, id=None, random=False,
rng=None, x_interpolant=None, k_interpolant=None, flux=None, pad_factor=0,
noise_pad=False, pad_image=None, use_cache=True, gsparams=None):
import pyfits
import numpy as np
if rng is None:
rng = galsim.BaseDeviate()
elif not isinstance(rng, galsim.BaseDeviate):
raise TypeError("The rng provided to RealGalaxy constructor is not a BaseDeviate")
# Code block below will be for galaxy selection; not all are currently implemented. Each
# option must return an index within the real_galaxy_catalog.
if index is not None:
if id is not None or random is True:
raise AttributeError('Too many methods for selecting a galaxy!')
use_index = index
elif id is not None:
if random is True:
raise AttributeError('Too many methods for selecting a galaxy!')
use_index = real_galaxy_catalog._get_index_for_id(id)
elif random is True:
uniform_deviate = galsim.UniformDeviate(rng)
use_index = int(real_galaxy_catalog.nobjects * uniform_deviate())
else:
raise AttributeError('No method specified for selecting a galaxy!')
# read in the galaxy, PSF images; for now, rely on pyfits to make I/O errors.
gal_image = real_galaxy_catalog.getGal(use_index)
PSF_image = real_galaxy_catalog.getPSF(use_index)
noise = real_galaxy_catalog.getNoise(use_index, rng, gsparams)
# save any other relevant information as instance attributes
self.catalog_file = real_galaxy_catalog.file_name
self.index = use_index
self.pixel_scale = float(real_galaxy_catalog.pixel_scale[use_index])
# handle noise-padding options
try:
noise_pad = galsim.config.value._GetBoolValue(noise_pad,'')
# If it's a bool and True, use the correlated noise specified in the catalog.
if noise_pad:
noise_pad = noise
else:
noise_pad = 0.
except:
# If it's not a bool, or convertible to a bool, leave it alone.
pass
self.original_image = galsim.InterpolatedImage(
gal_image, x_interpolant=x_interpolant, k_interpolant=k_interpolant,
dx=self.pixel_scale, pad_factor=pad_factor, noise_pad=noise_pad, rng=rng,
pad_image=pad_image, use_cache=use_cache, gsparams=gsparams)
# If flux is None, leave flux as given by original image
if flux != None:
self.original_image.setFlux(flux)
# also make the original PSF image, with far less fanfare: we don't need to pad with
# anything interesting.
self.original_PSF = galsim.InterpolatedImage(
PSF_image, x_interpolant=x_interpolant, k_interpolant=k_interpolant,
flux=1.0, dx=self.pixel_scale, gsparams=gsparams)
#self.original_PSF.setFlux(1.0)
# Calculate the PSF "deconvolution" kernel
psf_inv = galsim.Deconvolve(self.original_PSF, gsparams=gsparams)
# Initialize the SBProfile attribute
GSObject.__init__(
self, galsim.Convolve([self.original_image, psf_inv], gsparams=gsparams))
# Save the noise in the image as an accessible attribute
noise.convolveWith(psf_inv, gsparams)
self.noise = noise
def getHalfLightRadius(self):
raise NotImplementedError("Half light radius calculation not implemented for RealGalaxy "
+"objects.")
class RealGalaxyCatalog(object):
"""Class containing a catalog with information about real galaxy training data.
The RealGalaxyCatalog class reads in and stores information about a specific training sample of
realistic galaxies. We assume that all files containing the images (galaxies and PSFs) live in
one directory; they could be individual files, or multiple HDUs of the same file. Currently
there is no functionality that lets this be a FITS data cube, because we assume that the object
postage stamps will in general need to be different sizes depending on the galaxy size.
If only the catalog name (`'real_galaxy_catalog.fits'`) is specified, then the set of galaxy/PSF
image files (e.g., `'real_galaxy_images_1.fits'`, `'real_galaxy_PSF_images_1.fits'`, etc.) are
assumed to be in the directory as the catalog file (in the following example, in the current
working directory `./`):
>>> my_rgc = galsim.RealGalaxyCatalog('real_galaxy_catalog.fits')
If `image_dir` is specified, the set of galaxy/PSF image files is assumed to be in the
subdirectory of where the catalog is (in the following example, `./images`):
>>> my_rgc = galsim.RealGalaxyCatalog('real_galaxy_catalog.fits', image_dir='images')
If the real galaxy catalog is in some far-flung directory, and the galaxy/PSF image files are in
its subdirectory, one only needs to specify the long directory name once:
>>> file_name = '/data3/scratch/user_name/galsim/real_galaxy_data/real_galaxy_catalog.fits'
>>> image_dir = 'images'
>>> my_rgc = galsim.RealGalaxyCatalog(file_name, image_dir=image_dir)
In the above case, the galaxy/PSF image files are in the directory
`/data3/scratch/user_name/galsim/real_galaxy_data/images/`.
The above behavior is changed if the `image_dir` specifies a directory. In this case,
`image_dir` is interpreted as the full path:
>>> file_name = '/data3/scratch/user_name/galsim/real_galaxy_data/real_galaxy_catalog.fits'
>>> image_dir = '/data3/scratch/user_name/galsim/real_galaxy_data/images'
>>> my_rgc = galsim.RealGalaxyCatalog(file_name, image_dir=image_dir)
When `dir` is specified without `image_dir` being specified, both the catalog and
the set of galaxy/PSF images will be searched for under the directory `dir`:
>>> catalog_dir = '/data3/scratch/user_name/galsim/real_galaxy_data'
>>> file_name = 'real_galaxy_catalog.fits'
>>> my_rgc = galsim.RealGalaxyCatalog(file_name, dir=catalog_dir)
If the `image_dir` is specified in addition to `dir`, the catalog name is specified as
`dir/file_name`, while the galaxy/PSF image files will be searched for under `dir/image_dir`:
>>> catalog_dir = '/data3/scratch/user_name/galsim/real_galaxy_data'
>>> file_name = 'real_galaxy_catalog.fits'
>>> image_dir = 'images'
>>> my_rgc = galsim.RealGalaxyCatalog(file_name, image_dir=image_dir, dir=catalog_dir)
To explore for the future: scaling with number of galaxies, adding more information as needed,
and other i/o related issues.
The GalSim repository currently contains an example catalog, in
`GalSim/examples/data/real_galaxy_catalog_example.fits` (100 galaxies), along with the
corresponding image data in other files (`real_galaxy_images.fits` and
`real_galaxy_PSF_images.fits`) in that directory. For information on how to download a larger
sample of 26k training galaxies, see the RealGalaxy Data Download Page on the GalSim Wiki:
https://github.com/GalSim-developers/GalSim/wiki/RealGalaxy%20Data%20Download%20Page
@param file_name The file containing the catalog.
@param image_dir If a string containing no `/`, it is the relative path from the location of
the catalog file to the directory containing the galaxy/PDF images.
If a path (a string containing `/`), it is the full path to the directory
containing the galaxy/PDF images.
@param dir The directory of catalog file (optional).
@param preload Whether to preload the header information. [Default `preload = False`]
@param noise_dir The directory of the noise files if different from the directory of the
image files. [Default `noise_dir = image_dir`]
"""
_req_params = { 'file_name' : str }
_opt_params = { 'image_dir' : str , 'dir' : str, 'preload' : bool, 'noise_dir' : str }
_single_params = []
_takes_rng = False
# nobject_only is an intentionally undocumented kwarg that should be used only by
# the config structure. It indicates that all we care about is the nobjects parameter.
# So skip any other calculations that might normally be necessary on construction.
def __init__(self, file_name, image_dir=None, dir=None, preload=False, nobjects_only=False,
noise_dir=None):
import os
# First build full file_name
if dir is None:
self.file_name = file_name
if image_dir == None:
self.image_dir = os.path.dirname(file_name)
elif os.path.dirname(image_dir) == '':
self.image_dir = os.path.join(os.path.dirname(self.file_name),image_dir)
else:
self.image_dir = image_dir
else:
self.file_name = os.path.join(dir,file_name)
if image_dir == None:
self.image_dir = dir
else:
self.image_dir = os.path.join(dir,image_dir)
if not os.path.isdir(self.image_dir):
raise RuntimeError(self.image_dir+' directory does not exist!')
if noise_dir is None:
self.noise_dir = self.image_dir
else:
if not os.path.isdir(noise_dir):
raise RuntimeError(noise_dir+' directory does not exist!')
self.noise_dir = noise_dir
import pyfits
cat = pyfits.getdata(self.file_name)
self.nobjects = len(cat) # number of objects in the catalog
if nobjects_only: return # Exit early if that's all we needed.
ident = cat.field('ident') # ID for object in the training sample
# We want to make sure that the ident array contains all strings.
# Strangely, ident.astype(str) produces a string with each element == '1'.
# Hence this way of doing the conversion:
self.ident = [ "%s"%val for val in ident ]
self.gal_file_name = cat.field('gal_filename') # file containing the galaxy image
self.PSF_file_name = cat.field('PSF_filename') # file containing the PSF image
# We don't require the noise_filename column. If it is not present, we will use
# Uncorrelated noise based on the variance column.
try:
self.noise_file_name = cat.field('noise_filename') # file containing the noise cf
except:
self.noise_file_name = None
self.gal_hdu = cat.field('gal_hdu') # HDU containing the galaxy image
self.PSF_hdu = cat.field('PSF_hdu') # HDU containing the PSF image
self.pixel_scale = cat.field('pixel_scale') # pixel scale for image (could be different
# if we have training data from other datasets... let's be general here and make it a
# vector in case of mixed training set)
self.variance = cat.field('noise_variance') # noise variance for image
self.mag = cat.field('mag') # apparent magnitude
self.band = cat.field('band') # bandpass in which apparent mag is measured, e.g., F814W
self.weight = cat.field('weight') # weight factor to account for size-dependent
# probability
self.preloaded = False
self.do_preload = preload
self.saved_noise_im = {}
# eventually I think we'll want information about the training dataset,
# i.e. (dataset, ID within dataset)
# also note: will be adding bits of information, like noise properties and galaxy fit params
def _get_index_for_id(self, id):
"""Internal function to find which index number corresponds to the value ID in the ident
field.
"""
# Just to be completely consistent, convert id to a string in the same way we
# did above for the ident array:
id = "%s"%id
if id in self.ident:
return self.ident.index(id)
else:
raise ValueError('ID %s not found in list of IDs'%id)
def preload(self):
"""Preload the files into memory.
There are memory implications to this, so we don't do this by default. However, it can be
a big speedup if memory isn't an issue. Especially if many (or all) of the images are
stored in the same file as different HDUs.
"""
import pyfits
import os
import numpy
self.preloaded = True
self.loaded_files = {}
for file_name in numpy.concatenate((self.gal_file_name , self.PSF_file_name)):
if file_name not in self.loaded_files:
full_file_name = os.path.join(self.image_dir,file_name)
self.loaded_files[file_name] = pyfits.open(full_file_name)
def getGal(self, i):
"""Returns the galaxy at index `i` as an ImageViewD object.
"""
if i >= len(self.gal_file_name):
raise IndexError(
'index %d given to getGal is out of range (0..%d)'%(i,len(self.gal_file_name)-1))
import pyfits
import os
import numpy
if self.do_preload and not self.preloaded:
self.preload()
if self.preloaded:
array = self.loaded_files[self.gal_file_name[i]][self.gal_hdu[i]].data
else:
file_name = os.path.join(self.image_dir,self.gal_file_name[i])
array = pyfits.getdata(file_name,self.gal_hdu[i])
return galsim.ImageViewD(numpy.ascontiguousarray(array.astype(numpy.float64)))
def getPSF(self, i):
"""Returns the PSF at index `i` as an ImageViewD object.
"""
if i >= len(self.PSF_file_name):
raise IndexError(
'index %d given to getPSF is out of range (0..%d)'%(i,len(self.PSF_file_name)-1))
import pyfits
import os
import numpy
if self.do_preload and not self.preloaded:
self.preload()
if self.preloaded:
array = self.loaded_files[self.PSF_file_name[i]][self.PSF_hdu[i]].data
else:
file_name = os.path.join(self.image_dir,self.PSF_file_name[i])
array = pyfits.getdata(file_name,self.PSF_hdu[i])
return galsim.ImageViewD(numpy.ascontiguousarray(array.astype(numpy.float64)))
def getNoise(self, i, rng=None, gsparams=None):
"""Returns the noise cf at index `i` as a CorrelatedNoise object.
"""
if self.noise_file_name is None:
cf = galsim.UncorrelatedNoise(rng, self.pixel_scale[i], self.variance[i], gsparams)
else:
if i >= len(self.noise_file_name):
raise IndexError(
'index %d given to getNoise is out of range (0..%d)'%(
i,len(self.noise_file_name)-1))
if self.noise_file_name[i] in self.saved_noise_im:
im = self.saved_noise_im[self.noise_file_name[i]]
else:
import pyfits
import os
import numpy
file_name = os.path.join(self.noise_dir,self.noise_file_name[i])
array = pyfits.getdata(file_name)
im = galsim.ImageViewD(numpy.ascontiguousarray(array.astype(numpy.float64)))
self.saved_noise_im[self.noise_file_name[i]] = im
cf = galsim.correlatednoise._BaseCorrelatedNoise(
rng, galsim.InterpolatedImage(im, dx=self.pixel_scale[i], normalization="sb",
calculate_stepk=False, calculate_maxk=False,
x_interpolant='linear', gsparams=gsparams))
cf.setVariance(self.variance[i])
return cf
def simReal(real_galaxy, target_PSF, target_pixel_scale, g1=0.0, g2=0.0, rotation_angle=None,
rand_rotate=True, rng=None, target_flux=1000.0, image=None):
"""Function to simulate images (no added noise) from real galaxy training data.
This function takes a RealGalaxy from some training set, and manipulates it as needed to
simulate a (no-noise-added) image from some lower-resolution telescope. It thus requires a
target PSF (which could be an image, or one of our base classes) that represents all PSF
components including the pixel response, and a target pixel scale.
The default rotation option is to impose a random rotation to make irrelevant any real shears
in the galaxy training data (optionally, the RNG can be supplied). This default can be turned
off by setting `rand_rotate = False` or by requesting a specific rotation angle using the
`rotation_angle` keyword, in which case `rand_rotate` is ignored.
Optionally, the user can specify a shear (default 0). Finally, they can specify a flux
normalization for the final image, default 1000.
@param real_galaxy The RealGalaxy object to use, not modified in generating the
simulated image.
@param target_PSF The target PSF, either one of our base classes or an ImageView/Image.
@param target_pixel_scale The pixel scale for the final image, in arcsec.
@param g1 First component of shear to impose (components defined with respect
to pixel coordinates), [Default `g1 = 0.`]
@param g2 Second component of shear to impose, [Default `g2 = 0.`]
@param rotation_angle Angle by which to rotate the galaxy (must be a galsim.Angle
instance).
@param rand_rotate If `rand_rotate = True` (default) then impose a random rotation on
the training galaxy; this is ignored if `rotation_angle` is set.
@param rng A random number generator to use for selection of the random
rotation angle. (optional, may be any kind of galsim.BaseDeviate
or None)
@param target_flux The target flux in the output galaxy image, [Default
`target_flux = 1000.`]
@param image As with the GSObject.draw() function, if an image is provided,
then it will be used and returned.
If `image=None`, then an appropriately sized image will be created.
@return A simulated galaxy image. The input RealGalaxy is unmodified.
"""
# do some checking of arguments
if not isinstance(real_galaxy, galsim.RealGalaxy):
raise RuntimeError("Error: simReal requires a RealGalaxy!")
for Class in galsim.Image.values() + galsim.ImageView.values():
if isinstance(target_PSF, Class):
target_PSF = galsim.InterpolatedImage(target_PSF.view(), dx=target_pixel_scale)
break
if not isinstance(target_PSF, galsim.GSObject):
raise RuntimeError("Error: target PSF is not an Image, ImageView, or GSObject!")
if rotation_angle != None and not isinstance(rotation_angle, galsim.Angle):
raise RuntimeError("Error: specified rotation angle is not an Angle instance!")
if (target_pixel_scale < real_galaxy.pixel_scale):
import warnings
message = "Warning: requested pixel scale is higher resolution than original!"
warnings.warn(message)
import math # needed for pi, sqrt below
g = math.sqrt(g1**2 + g2**2)
if g > 1:
raise RuntimeError("Error: requested shear is >1!")
# make sure target PSF is normalized
target_PSF.setFlux(1.0)
real_galaxy_copy = real_galaxy.copy()
# rotate
if rotation_angle != None:
real_galaxy_copy.applyRotation(rotation_angle)
elif rotation_angle == None and rand_rotate == True:
if rng == None:
uniform_deviate = galsim.UniformDeviate()
elif isinstance(rng,galsim.BaseDeviate):
uniform_deviate = galsim.UniformDeviate(rng)
else:
raise TypeError("The rng provided to drawShoot is not a BaseDeviate")
rand_angle = galsim.Angle(math.pi*uniform_deviate(), galsim.radians)
real_galaxy_copy.applyRotation(rand_angle)
# set fluxes
real_galaxy_copy.setFlux(target_flux)
# shear
if (g1 != 0.0 or g2 != 0.0):
real_galaxy_copy.applyShear(g1=g1, g2=g2)
# convolve, resample
out_gal = galsim.Convolve([real_galaxy_copy, target_PSF])
image = out_gal.draw(image=image, dx = target_pixel_scale)
# return simulated image
return image
|
mardom/GalSim
|
galsim/real.py
|
Python
|
gpl-3.0
| 31,286
|
[
"Galaxy"
] |
d115c5f5dbac00df43468cf35acebe96f18d199394ee8985a9bfeb307640c96b
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
from espressomd import lb
from espressomd import utils
class ArrayLockedTest(ut.TestCase):
def test_locked_operators(self):
v = utils.array_locked([1., 2., 3.])
with self.assertRaises(ValueError):
v[0] = 0
with self.assertRaises(ValueError):
v += [1, 1, 1]
with self.assertRaises(ValueError):
v -= [1, 1, 1]
with self.assertRaises(ValueError):
v *= [1, 1, 1]
with self.assertRaises(ValueError):
v /= [1, 1, 1]
with self.assertRaises(ValueError):
v //= [1, 1, 1]
with self.assertRaises(ValueError):
v %= [1, 1, 1]
with self.assertRaises(ValueError):
v **= [1, 1, 1]
with self.assertRaises(ValueError):
v <<= [1, 1, 1]
with self.assertRaises(ValueError):
v >>= [1, 1, 1]
with self.assertRaises(ValueError):
v &= [1, 1, 1]
with self.assertRaises(ValueError):
v |= [1, 1, 1]
with self.assertRaises(ValueError):
v ^= [1, 1, 1]
def test_unlocked_operators(self):
v = utils.array_locked([1, 2, 3])
w = utils.array_locked([4, 5, 6])
add = v + w
sub = v - w
self.assertTrue(isinstance(add, np.ndarray))
self.assertTrue(isinstance(sub, np.ndarray))
self.assertTrue(add.flags.writeable)
self.assertTrue(sub.flags.writeable)
np.testing.assert_array_equal(add, np.add(np.copy(v), np.copy(w)))
np.testing.assert_array_equal(sub, np.subtract(np.copy(v), np.copy(w)))
np.testing.assert_array_equal(sub, -(w - v))
def test_copy_is_writeable(self):
v = np.copy(utils.array_locked([1, 2, 3]))
self.assertTrue(v.flags.writeable)
def test_setter(self):
v = utils.array_locked([1, 2, 3])
v = [4, 5, 6]
np.testing.assert_array_equal(v, [4, 5, 6])
class ArrayPropertyTest(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.box_l = [12.0, 12.0, 12.0]
system.time_step = 0.01
system.cell_system.skin = 0.01
system.part.add(pos=[0, 0, 0])
lbf = lb.LBFluid(agrid=0.5, dens=1, visc=1, tau=0.01)
system.actors.add(lbf)
def locked_operators(self, v):
with self.assertRaises(ValueError):
v[0] = 0
with self.assertRaises(ValueError):
v += [1, 1, 1]
with self.assertRaises(ValueError):
v -= [1, 1, 1]
with self.assertRaises(ValueError):
v *= [1, 1, 1]
with self.assertRaises(ValueError):
v /= [1, 1, 1]
with self.assertRaises(ValueError):
v //= [1, 1, 1]
with self.assertRaises(ValueError):
v %= [1, 1, 1]
with self.assertRaises(ValueError):
v **= [1, 1, 1]
with self.assertRaises(ValueError):
v <<= [1, 1, 1]
with self.assertRaises(ValueError):
v >>= [1, 1, 1]
with self.assertRaises(ValueError):
v &= [1, 1, 1]
with self.assertRaises(ValueError):
v |= [1, 1, 1]
with self.assertRaises(ValueError):
v ^= [1, 1, 1]
def set_copy(self, v):
cpy = np.copy(v)
self.assertTrue(cpy.flags.writeable)
def test_common(self):
# Check for exception for various operators
# Particle
self.locked_operators(self.system.part[0].pos)
self.locked_operators(self.system.part[0].v)
self.locked_operators(self.system.part[0].f)
self.locked_operators(self.system.part[0].pos_folded)
# System
self.locked_operators(self.system.box_l)
# Check (allowed) setter
# Particle
self.system.part[0].pos = [2, 2, 2]
self.assertTrue((self.system.part[0].pos == [2, 2, 2]).all())
self.system.part[0].v = [2, 2, 2]
self.assertTrue((self.system.part[0].v == [2, 2, 2]).all())
self.system.part[0].f = [2, 2, 2]
self.assertTrue((self.system.part[0].f == [2, 2, 2]).all())
# System
self.system.box_l = [2, 2, 2]
self.assertTrue((self.system.box_l == [2, 2, 2]).all())
# Check if copy is settable
# Particle
self.set_copy(self.system.part[0].pos)
self.set_copy(self.system.part[0].pos)
self.set_copy(self.system.part[0].v)
self.set_copy(self.system.part[0].f)
self.set_copy(self.system.part[0].pos_folded)
# System
self.set_copy(self.system.box_l)
@utx.skipIfMissingFeatures(["ROTATION"])
def test_rotation(self):
# Check for exception for various operators
# Particle
self.locked_operators(self.system.part[0].omega_lab)
self.locked_operators(self.system.part[0].quat)
self.locked_operators(self.system.part[0].rotation)
self.locked_operators(self.system.part[0].omega_body)
self.locked_operators(self.system.part[0].torque_lab)
if espressomd.has_features("EXTERNAL_FORCES"):
self.locked_operators(self.system.part[0].ext_torque)
# Check (allowed) setter
# Particle
self.system.part[0].quat = [0.5, 0.5, 0.5, 0.5]
self.assertTrue(
(self.system.part[0].quat == [0.5, 0.5, 0.5, 0.5]).all())
self.system.part[0].omega_lab = [2, 2, 2]
self.assertTrue((self.system.part[0].omega_lab == [2, 2, 2]).all())
self.system.part[0].rotation = [1, 1, 1]
self.assertTrue((self.system.part[0].rotation == [1, 1, 1]).all())
self.system.part[0].omega_body = [2, 2, 2]
self.assertTrue((self.system.part[0].omega_body == [2, 2, 2]).all())
self.system.part[0].torque_lab = [2, 2, 2]
self.assertTrue((self.system.part[0].torque_lab == [2, 2, 2]).all())
if espressomd.has_features("EXTERNAL_FORCES"):
self.system.part[0].ext_torque = [2, 2, 2]
self.assertTrue(
(self.system.part[0].ext_torque == [2, 2, 2]).all())
# Check if copy is settable
# Particle
self.set_copy(self.system.part[0].omega_lab)
self.set_copy(self.system.part[0].quat)
self.set_copy(self.system.part[0].rotation)
self.set_copy(self.system.part[0].omega_body)
self.set_copy(self.system.part[0].torque_lab)
if espressomd.has_features("EXTERNAL_FORCES"):
self.set_copy(self.system.part[0].ext_torque)
@utx.skipIfMissingFeatures(["ROTATIONAL_INERTIA"])
def test_rotational_inertia(self):
# Check for exception for various operators
# Particle
self.locked_operators(self.system.part[0].rinertia)
# Check (allowed) setter
# Particle
self.system.part[0].rinertia = [2, 2, 2]
self.assertTrue((self.system.part[0].rinertia == [2, 2, 2]).all())
# Check if copy is settable
# Particle
self.set_copy(self.system.part[0].rinertia)
@utx.skipIfMissingFeatures(["EXTERNAL_FORCES"])
def test_external_forces(self):
# Check for exception for various operators
# Particle
self.locked_operators(self.system.part[0].ext_force)
self.locked_operators(self.system.part[0].fix)
# Check (allowed) setter
# Particle
self.system.part[0].ext_force = [2, 2, 2]
self.assertTrue((self.system.part[0].ext_force == [2, 2, 2]).all())
self.system.part[0].fix = [1, 1, 1]
self.assertTrue((self.system.part[0].fix == [1, 1, 1]).all())
# Check if copy is settable
# Particle
self.set_copy(self.system.part[0].ext_force)
self.set_copy(self.system.part[0].fix)
@utx.skipIfMissingFeatures(["ROTATION", "PARTICLE_ANISOTROPY"])
def test_rot_aniso(self):
# Check for exception for various operators
# Particle
self.locked_operators(self.system.part[0].gamma_rot)
# Check (allowed) setter
# Particle
self.system.part[0].gamma_rot = [2, 2, 2]
self.assertTrue((self.system.part[0].gamma_rot == [2, 2, 2]).all())
# Check if copy is settable
# Particle
self.set_copy(self.system.part[0].gamma_rot)
def test_lb(self):
# Check for exception for various operators
# LB
self.locked_operators(self.lbf[0, 0, 0].velocity)
self.locked_operators(self.lbf[0, 0, 0].stress)
self.locked_operators(self.lbf[0, 0, 0].stress_neq)
self.locked_operators(self.lbf[0, 0, 0].population)
@utx.skipIfMissingFeatures(["LANGEVIN_PER_PARTICLE",
"PARTICLE_ANISOTROPY"])
def test_langevinpp_aniso(self):
# Check for exception for various operators
# Particle
self.locked_operators(self.system.part[0].gamma)
# Check (allowed) setter
# Particle
self.system.part[0].gamma = [2, 2, 2]
self.assertTrue((self.system.part[0].gamma == [2, 2, 2]).all())
# Check if copy is settable
# Particle
self.set_copy(self.system.part[0].gamma)
@utx.skipIfMissingFeatures(["DIPOLES"])
def test_dipoles(self):
# Check for exception for various operators
# Particle
self.locked_operators(self.system.part[0].dip)
# Check (allowed) setter
# Particle
self.system.part[0].dip = [2, 2, 2]
np.testing.assert_allclose(
[2, 2, 2], np.copy(self.system.part[0].dip), atol=1E-15)
# Check if copy is settable
# Particle
self.set_copy(self.system.part[0].dip)
@utx.skipIfMissingFeatures(["EXCLUSIONS"])
def test_exclusions(self):
# Check for exception for various operators
# Particle
self.locked_operators(self.system.part[0].exclusions)
def test_partial_periodic(self):
# Check for exception for various operators
# System
self.locked_operators(self.system.periodicity)
# Check (allowed) setter
# System
self.system.periodicity = [1, 0, 0]
self.assertTrue((self.system.periodicity == [1, 0, 0]).all())
# Check if copy is settable
# System
self.set_copy(self.system.periodicity)
if __name__ == "__main__":
ut.main()
|
psci2195/espresso-ffans
|
testsuite/python/array_properties.py
|
Python
|
gpl-3.0
| 11,121
|
[
"ESPResSo"
] |
1c6e3eac854945da240be89bfaf666d2c986cdcb4fc2e660097a4bbfd6d359c0
|
#!/usr/bin/python
# Copyright (c) 2009, Purdue University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# Neither the name of the Purdue University nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pylint unit test
Checks code for style errors.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import unittest
import subprocess
import shlex
import os
import re
PYLINT_CMD = 'pylint --rcfile pylint.rc'
CORE_DIR = '../roster-core/roster_core'
SERVER_DIR = '../roster-server/roster_server'
CONFIG_DIR = '../roster-config-manager/roster_config_manager'
USER_DIR = '../roster-user-tools/roster_user_tools'
TEST_DIR = '.'
def pylint_dir(directory, mask='.py$'):
"""
Input:
directory (string): a string of the directory file is in
mask (string): a regexp to match files
Output:
(list): a list of all the outputs generated
"""
file_list = os.listdir(directory)
lint_output_list = []
for filename in file_list:
if re.search(mask, filename) and filename != '.svn':
lint_command = shlex.split('%s %s/%s' % (PYLINT_CMD, directory, filename))
lint_process = subprocess.Popen(lint_command, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
lint_output = lint_process.communicate()
if lint_output[0] != '':
lint_output_list.append(lint_output[0])
return lint_output_list
class TestPythonStyle(unittest.TestCase):
def setUp(self):
which_command = ['which', 'pylint']
which_process = subprocess.Popen(which_command, stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
which_process.communicate()
if( which_process.returncode != 0 ):
print 'pylint is not installed'
print 'Please visit: http://pypi.python.org/pypi/pylint#downloads'
self.fail()
def test_core(self):
lint_output = pylint_dir(CORE_DIR)
for output in lint_output:
print(output)
lint_output2 = pylint_dir('%s/../scripts' % CORE_DIR, '')
for output in lint_output2:
print(output)
self.assertEqual(0, len(lint_output))
self.assertEqual(0, len(lint_output2))
def test_server(self):
lint_output = pylint_dir(SERVER_DIR)
for output in lint_output:
print(output)
lint_output2 = pylint_dir('%s/../scripts' % SERVER_DIR, '')
for output in lint_output2:
print(output)
self.assertEqual(0, len(lint_output))
self.assertEqual(0, len(lint_output2))
def test_config_manager(self):
lint_output = pylint_dir(CONFIG_DIR)
for output in lint_output:
print(output)
lint_output2 = pylint_dir('%s/../scripts' % CONFIG_DIR, '')
for output in lint_output2:
print(output)
self.assertEqual(0, len(lint_output))
self.assertEqual(0, len(lint_output2))
def test_user_tools(self):
lint_output = pylint_dir(USER_DIR)
for output in lint_output:
print(output)
lint_output2 = pylint_dir('%s/../scripts' % USER_DIR, '')
for output in lint_output2:
print(output)
self.assertEqual(0, len(lint_output))
self.assertEqual(0, len(lint_output2))
## this one currently blocks execution
## Presumably when it is checking the file running
# def test_tests(self):
# lint_output = pylint_dir(TEST_DIR)
# for output in lint_output:
# print(output)
if( __name__ == '__main__' ):
unittest.main()
|
stephenlienharrell/roster-dns-management
|
test/pylint_test.py
|
Python
|
bsd-3-clause
| 4,788
|
[
"VisIt"
] |
333c27468e0ee428febc44faa7dacad18471b75513d551dbafbc47295a3b9017
|
# -*- coding: utf-8 -*-
#
# BrodyHopfield.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Spike synchronization through subthreshold oscillation
------------------------------------------------------
This script reproduces the spike synchronization behavior
of integrate-and-fire neurons in response to a subthreshold
oscillation. This phenomenon is shown in Fig. 1 of
C.D. Brody and J.J. Hopfield
Simple Networks for Spike-Timing-Based Computation,
with Application to Olfactory Processing
Neuron 37, 843-852 (2003)
Neurons receive a weak 35 Hz oscillation, a gaussian noise current
and an increasing DC. The time-locking capability is shown to
depend on the input current given. The result is then plotted using
pylab. All parameters are taken from the above paper.
'''
'''
First, we import all necessary modules for simulation, analysis and
plotting.
'''
import nest
import nest.raster_plot
'''
Second, the simulation parameters are assigned to variables.
'''
N = 1000 # number of neurons
bias_begin = 140. # minimal value for the bias current injection [pA]
bias_end = 200. # maximal value for the bias current injection [pA]
T = 600 # simulation time (ms)
driveparams = {'amplitude':50., 'frequency':35.} #parameters for the alternative-current generator
noiseparams = {'mean':0.0, 'std':200.} #parameters for the noise generator
neuronparams = { 'tau_m':20., #membrane time constant
'V_th':20., #threshold potential
'E_L':10., #membrane resting potential
't_ref':2., #refractory period
'V_reset':0., #reset potential
'C_m':200., #membrane capacitance
'V_m':0.} #initial membrane potential
'''
Third, the nodes are created using `Create`. We store the returned
handles in variables for later reference.
'''
neurons = nest.Create('iaf_psc_alpha',N)
sd = nest.Create('spike_detector')
noise = nest.Create('noise_generator')
drive = nest.Create('ac_generator')
'''
Set the parameters specified above for the generators using `SetStatus`.
'''
nest.SetStatus(drive, driveparams )
nest.SetStatus(noise, noiseparams )
'''
Set the parameters specified above for the neurons. Nurons getan internal
current. The first neuron additionally receives the current with amplitude
``bias_begin``, the last neuron with amplitude ``bias_end``.
'''
nest.SetStatus(neurons, neuronparams)
nest.SetStatus(neurons, [{'I_e': (n * (bias_end - bias_begin) / N + bias_begin)} for n in neurons])
'''
Set the parameters for the `spike_detector`: recorded data should include
the information about global IDs of spiking neurons and the time of
individual spikes.
'''
nest.SetStatus(sd, {"withgid": True, "withtime": True})
'''
Connect alternative current and noise generators as well as `spike_detector`s.
to neurons
'''
nest.DivergentConnect(drive, neurons)
nest.DivergentConnect(noise, neurons)
nest.ConvergentConnect(neurons, sd)
'''
Simulate the network for time T.
'''
nest.Simulate(T)
'''
Plot the raster plot of the neuronal spiking activity.
'''
nest.raster_plot.from_device(sd, hist=True)
|
kristoforcarlson/nest-simulator-fork
|
pynest/examples/BrodyHopfield.py
|
Python
|
gpl-2.0
| 3,825
|
[
"Gaussian",
"NEURON"
] |
b32e96353feae6df498b1c7ce4b7f965eba640bc2bc41a919e9bcee72483ae89
|
import py, re, os, signal, time, commands, sys
from subprocess import Popen, PIPE
mod_re = (r"\bmodule\s+(", r")\s*\(\s*")
func_re = (r"\bfunction\s+(", r")\s*\(")
def extract_definitions(fpath, name_re=r"\w+", def_re=""):
regex = name_re.join(def_re)
matcher = re.compile(regex)
return (m.group(1) for m in matcher.finditer(fpath.read()))
def extract_mod_names(fpath, name_re=r"\w+"):
return extract_definitions(fpath, name_re=name_re, def_re=mod_re)
def extract_func_names(fpath, name_re=r"\w+"):
return extract_definitions(fpath, name_re=name_re, def_re=func_re)
def collect_test_modules(dirpath=None):
dirpath = dirpath or py.path.local("./")
print "Collecting openscad test module names"
test_files = {}
for fpath in dirpath.visit('*.scad'):
#print fpath
modules = extract_mod_names(fpath, r"test\w*")
#functions = extract_func_names(fpath, r"test\w*")
test_files[fpath] = modules
return test_files
class Timeout(Exception): pass
def call_openscad(path, stlpath, timeout=5):
if sys.platform == 'darwin': exe = 'OpenSCAD.app/Contents/MacOS/OpenSCAD'
else: exe = 'openscad'
command = [exe, '-s', str(stlpath), str(path)]
print command
if timeout:
try:
proc = Popen(command,
stdout=PIPE, stderr=PIPE, close_fds=True)
calltime = time.time()
time.sleep(0.05)
#print calltime
while True:
if proc.poll() is not None:
break
time.sleep(0.5)
#print time.time()
if time.time() > calltime + timeout:
raise Timeout()
finally:
try:
proc.terminate()
proc.kill()
except OSError:
pass
return (proc.returncode,) + proc.communicate()
else:
output = commands.getstatusoutput(" ".join(command))
return output + ('', '')
def parse_output(text):
pass
|
Obijuan/tutorial-openscad
|
temporada-2/T16-estudiando-codigo-de-otros/04-cyclone/smooth_rod_fix/MCAD/openscad_utils.py
|
Python
|
gpl-2.0
| 2,036
|
[
"VisIt"
] |
9e5019e1d52bc14d7178cd796aabe5addc6e948a42f9d2ca960798191b71f54d
|
# BEGIN_COPYRIGHT
# END_COPYRIGHT
"""
Parse a `SAM <http://samtools.sourceforge.net/>`_ file and output
alignment info in one of the following formats:
* tabular, suitable for processing by the marker alignment importer
* tabular, suitable for processing by the `Galaxy
<http://galaxyproject.org/>`_ DNA extractor
Expects single-end BWA alignment data produced by the previous steps
in the workflow (see markers_to_fastq).
"""
import os
from contextlib import nested
from bl.core.seq.align.mapping import SAMMapping
from bl.core.utils import NullLogger
import bl.vl.utils.snp as vlu_snp
from common import MARKER_AL_FIELDS, CHR_CODES, DUMMY_AL_VALUES, \
SeqNameSerializer
HELP_DOC = __doc__
OUTPUT_FORMATS = ["marker_alignment", "segment_extractor"]
DEFAULT_OUTPUT_FORMAT = OUTPUT_FORMATS[0]
DEFAULT_FLANK_SIZE = vlu_snp.SNP_FLANK_SIZE
def SamReader(f):
for line in f:
line = line.strip()
if line == "" or line.startswith("@"):
continue
yield SAMMapping(line.split())
class SnpHitProcessor(object):
HEADER = MARKER_AL_FIELDS
def __init__(self, ref_tag, outf, outfmt=DEFAULT_OUTPUT_FORMAT,
flank_size=DEFAULT_FLANK_SIZE, logger=None):
self.logger = logger or NullLogger()
self.ref_tag = ref_tag
self.outf = outf
self.outfmt = outfmt
self.flank_size = flank_size
self.current_id = None
self.current_hits = []
self.serializer = SeqNameSerializer()
def process(self, hit):
"""
Process a hit in the SAMMapping format, looking for a perfect
(edit distance, i.e., NM tag value == 0) and unambiguous (mapping
quality > 0) hit.
"""
name = hit.get_name()
id_, allele, snp_offset, _ = self.serializer.deserialize(name)
if id_ != self.current_id:
if self.current_id is not None:
self.dump_current_hits()
self.current_id = id_
self.current_hits = []
nm = hit.tag_value('NM')
mapped = hit.is_mapped()
if mapped and nm <= 0 and hit.qual > 0:
snp_pos = hit.get_untrimmed_pos() + snp_offset
chr_code = CHR_CODES.get(hit.tid, 'None')
strand = '-' if hit.is_on_reverse() else '+'
if self.outfmt == DEFAULT_OUTPUT_FORMAT:
r = [id_, self.ref_tag, str(chr_code), str(snp_pos), strand, allele]
else:
if hit.tid is None:
self.logger.error("%r: can't use null chr for %r output" %
(name, self.outfmt))
return
start = snp_pos - self.flank_size - 1
end = snp_pos + self.flank_size
r = [hit.tid, str(start), str(end), name, '0', strand]
self.current_hits.append(r)
else:
self.logger.info("%r: mapped:%r; NM:%r; qual:%r" %
(name, mapped, nm, hit.qual))
def dump_current_hits(self):
nh = len(self.current_hits)
if nh != 1:
self.logger.warn("hit count for %s: %d != 1" % (self.current_id, nh))
if nh == 0 and self.outfmt == DEFAULT_OUTPUT_FORMAT:
self.current_hits.append([
self.current_id,
self.ref_tag,
DUMMY_AL_VALUES["chromosome"],
DUMMY_AL_VALUES["pos"],
DUMMY_AL_VALUES["strand"],
DUMMY_AL_VALUES["allele"],
])
if self.outfmt == DEFAULT_OUTPUT_FORMAT:
for hit in self.current_hits:
hit.append(str(nh))
assert hit[0] == self.current_id
self.write_row(hit)
else:
if nh == 1:
self.write_row(self.current_hits[0])
def write_row(self, data):
self.outf.write("\t".join(data)+"\n")
def write_header(self):
if self.outfmt == DEFAULT_OUTPUT_FORMAT:
self.write_row(self.HEADER)
def close_open_handles(self):
self.outf.close()
def write_output(sam_reader, outf, reftag, outfmt, flank_size, logger=None):
logger = logger or NullLogger()
hit_processor = SnpHitProcessor(reftag, outf, outfmt, flank_size, logger)
hit_processor.write_header()
for i, m in enumerate(sam_reader):
hit_processor.process(m)
hit_processor.dump_current_hits() # last pair
return i+1
def make_parser(parser):
parser.add_argument('-i', '--input-file', metavar='FILE', required=True,
help='input SAM file')
parser.add_argument('-o', '--output-file', metavar='FILE', required=True,
help='output file')
parser.add_argument('--reftag', metavar='STRING', required=True,
help='reference genome tag')
parser.add_argument('--output-format', metavar='STRING',
choices=OUTPUT_FORMATS, default=DEFAULT_OUTPUT_FORMAT,
help='possible values: %r' % (OUTPUT_FORMATS,))
parser.add_argument('--flank-size', metavar='INT', type=int,
default=DEFAULT_FLANK_SIZE,
help='size of the flanks to extract around the SNP.' +
' Has no effect with marker alignment output')
def main(logger, args):
with nested(open(args.input_file), open(args.output_file, 'w')) as (f, outf):
bn = os.path.basename(args.input_file)
logger.info("processing %r" % bn)
reader = SamReader(f)
count = write_output(reader, outf, args.reftag, args.output_format,
args.flank_size, logger=logger)
logger.info("SAM records processed from %r: %d" % (bn, count))
def do_register(registration_list):
registration_list.append(('convert_sam', HELP_DOC, make_parser, main))
|
crs4/omero.biobank
|
bl/vl/app/snp_manager/convert_sam.py
|
Python
|
gpl-2.0
| 5,388
|
[
"BWA",
"Galaxy"
] |
be5a4d0fedd61a686bf82ff6b5aa322abbc11c59e35d0177b988627dff0d79d0
|
import numpy as np
import numpy.random as rng
import pylab as pl
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab as mp
import copy, sys
import math
import optparse
import scipy.signal
import scipy.special.basic as sp
def make_dirichlet_bins(data,num_bins,strategy,num_dirs=50,alpha=10.,stretch_factor=None,total_alpha=None,safety_gap=np.inf):
z = copy.copy(data)
z.sort()
top, bottom = z[-1], z[0]
alphas = [alpha]*num_bins #can only do eqocc and width for now
dirs = rng.dirichlet(alphas,num_dirs)
mybins = np.zeros((num_dirs,num_bins+1))
mybins[:,0] = bottom
mybins[:,-1] = top
if strategy == 'eqocc': #(roughly) equal occupancies
num_datapts = z.size
for d in range(dirs.shape[0]):
props = (np.cumsum(dirs[d])*num_datapts)[:-1]
for p in range(len(props)):
mybins[d,p+1] = (z[props[p]] + z[props[p]+1])/2
elif strategy == 'width': #(roughly) equal width
datarange = top - bottom
for d in range(dirs.shape[0]):
props = np.cumsum(dirs[d])[:-1]
for p in range(len(props)):
mybins[d,p+1] = props[p] * datarange
else:
sys.exit('Not a valid binning strategy')
#safety gap
mybins[:,0] -= safety_gap
mybins[:,-1] += safety_gap
#return bin borders
return mybins
def make_bin_borders(data,num_bins,strategy='eqocc',safety_gap=np.inf,fname=None,prop=0.5):
z = copy.copy(data)
z.sort()
top, bottom = z[-1], z[0]
mybins = []
if strategy == 'eqocc': #Equal occupancies
step = len(z)/num_bins
for i in range(0,len(z)-step+1,step):
mybins.append(z[i])
mybins.append(z[-1]) # ie. these are really bin BORDERS.
elif strategy == 'width': #Equal width
step = (top-bottom)/(num_bins+0.1)
mybins = [bottom + x*step for x in range(0, num_bins)]
mybins.append(z[-1]) # the last one.
else:
sys.exit('Not a valid binning strategy')
# Now ensure the borders are big enough to catch new data that's out-of-range.
mybins[-1] += safety_gap
mybins[0] -= safety_gap
return mybins
def calc_logP(n, alphas):
""" Calculate the log likelihood under DirMult distribution with alphas=avec, given data counts of nvec.
Uses approximation of log gamma"""
a = alphas + 0.0001 # just in case of zeros, which log objects to working with.
#print a,n
Ntot = np.sum(n)
Atot = np.sum(a)
s = Atot*np.log(Atot) - (Ntot+Atot)*np.log(Ntot+Atot) + Ntot
s = s + np.sum((n+a)*np.log(n+a) - n - a*np.log(a))
return s
def calc_lgamma_vect(vect):
"""Calculate the log gamma of each number in a vector """
v = vect + 0.0001 #in case of zeros, which log gamma doesn't like
for i in range(v.size):
v[i] = math.lgamma(v[i])
return v
def calc_full(n, alphas, sum_alphas, lg_sum_alphas, sum_lg_alphas):
""" Calculate the log likelihood under DirMult distribution with alphas=avec, given data counts of nvec."""
lg_sum_alphas_n = math.lgamma(sum_alphas + np.sum(n))
sum_lg_alphas_n = np.sum(calc_lgamma_vect(n+alphas))
s = lg_sum_alphas - sum_lg_alphas - lg_sum_alphas_n + sum_lg_alphas_n
return s
def calc_gradients(x,sigma,m):
""" Calculate gradients for m and sigma for a given m, sigma, and window[xposl:xposr]
Returns two x-length vectors."""
wx = np.exp(-(np.power(x-m,2.)/2.*np.power(sigma,2.)))
grad_m = wx*(x-m)/np.power(sigma,2.)
grad_sigma = wx*np.power((x-m),2.)/np.power(sigma,3.)
return grad_m, grad_sigma
def calc_grad_weight(nks, alphaS, alphaB, N, AB, AS):
""" Calculate the weights for each bin k. Returns k-length vector."""
w = sp.psi(nks + alphaS) - sp.psi(nks+alphaB) + sp.psi(N+AB) - sp.psi(N+AS)
return w
def calc_fullgrad(wgt,data,gradient):
full = np.dot(wgt,np.dot(data,gradient))
return full
if __name__ == "__main__":
parser = optparse.OptionParser(usage="usage %prog [options]")
parser.add_option("-n","--numbins",type = "int",dest = "K",default=0,
help="number of bins (ignored if strategy is dexpocc or fromfile)")
parser.add_option("-s","--binning_strategy",dest = "strategy",
help="eqocc, width or fromfile. "
"MANDATORY OPTION.")
parser.add_option("-d","--datafile",dest = "infile",
help="a list of numbers: 1D data to be read in (can't be "
"used with --rngseed)")
parser.add_option("-r","--rngseed",type = "int",dest = "seed",
help="an int to make random data up (can't be used with "
"--datafile)")
parser.add_option("-t","--dirichlet",action="store_true",dest="dirichlet",default=False,
help="make dirichlet bin borders (incompatible with \"from file\" binning stratgegy)")
parser.add_option("-o","--nohisto",action="store_true",dest="nohisto",default=False,
help="no histo in fig")
opts, args = parser.parse_args()
EXIT = False
if opts.strategy is None:
print "ERROR: you must supply a binning strategy\n"
EXIT = True
if opts.infile and opts.seed:
print "ERROR: supply EITHER a datafile OR a random seed to make up data\n"
EXIT = True
if opts.dirichlet and opts.strategy=="fromfile":
print "ERROR: dirichlet bin borders are incompatible with using bin borders from file\n"
EXIT = True
if EXIT:
parser.print_help()
sys.exit(-1)
strategy = opts.strategy
outfile = 'DirModel_%s' % strategy
K = opts.K
if opts.seed:
seed = opts.seed
# make an "image"
rng.seed(seed) # seed the random number generator here
N = 500 #number of pixels in a fake test image
noise_size=1.0
x = np.arange(N)
# make up the 'shapes' of the sources
mid1, mid2, mid3 = rng.random() * N,rng.random() * N,rng.random() * N
print 'Random sources placed at ',mid1, mid2, mid3
spread1 = int(N/80 + rng.random()*N/50) # length scale
spread2 = int(2+2*rng.random()) # length scale
spread3 = int(2+2*rng.random()) # length scale
shape1 = 0.8*np.exp(-0.5*np.power((x-mid1)*1.0/spread1,2.0))
shape2 = 5.0*np.exp(-0.5*np.power((x-mid2)*1.0/spread2,2.0))
shape3 = 3.0*np.exp(-0.5*np.power((x-mid3)*1.0/spread3,2.0))
# noise character of sources
variance = noise_size*(1.0 - shape1 + shape2) # source 3 has no variance effect
#variance = variance + x/float(len(x)) # to mimic steady change over large scales
noise = rng.normal(0,variance,x.shape)
# mean_intensity character of sources
mean = shape1 + shape2 + shape3
y = mean + noise
outfile += '_%d' % seed
#shapex left and right is +/- 1 sigma from the mean
#gives three true [leftx,rightx] for three shapes
left1=round(mid1)-spread1; right1=round(mid1)+spread1+1
left2=round(mid2)-spread2; right2=round(mid2)+spread2+1
left3=round(mid3)-spread3; right3=round(mid3)+spread3+1
true_sources = [(left1,right1),(left2,right2),(left3,right3)]
true_sources.sort()
else: # it's not a digit, so it's a filename. File should be just list of numbers.
infile = opts.infile
y = np.genfromtxt(infile)
x = np.arange(len(y))
N = len(y)
outfile += '_%s' % infile
#make bins (here, from the naked image)
if opts.dirichlet:
outfile += '_dirichletborders'
BINS = make_dirichlet_bins(y,K,strategy)
if K == 0:
K = BINS.shape[1] - 1
print 'Note: an example overall histogram: (using the first of the dirichlet histograms)'
print np.histogram(y,bins=BINS[0])[0]
else:
BINS = make_bin_borders(y,K,strategy,safety_gap=np.inf,fname=opts.bfname,prop=opts.prop)
if K == 0:
K = len(BINS) - 1
print 'Note: this makes the overall histogram this: (reality-check the final one especially)'
print np.histogram(y,bins=BINS)[0]
outfile += '_K%d' % K
# bogus, but we're setting the background alphas as if there were
# no sources in the image at the moment....
if opts.dirichlet:
alpha_BGs = np.zeros((BINS.shape[0],BINS.shape[1]-1))
Cxk = np.zeros((BINS.shape[1]-1,N))
for b in range(BINS.shape[0]):
alpha_BGs[b] = np.histogram(y,bins=BINS[b])[0]
for i in range(N):
Cxk[:,i] += np.histogram(y[i],bins=BINS[b])[0]
alpha_BG = np.mean(alpha_BGs,axis=0) + 0.5
Cxk /= float(BINS.shape[0])
else:
alpha_BG = np.histogram(y,bins=BINS)[0] + 0.5
Cxk = np.zeros((len(BINS)-1,N))
for i in range(N):
Cxk[:,i]=np.histogram(y[i],bins=BINS)[0]
alpha_SRC = 0.5 * np.ones(alpha_BG.shape) # 0.5 if the Jeffries prior
max_spread = N
score = np.zeros((max_spread,N))
max_wd = max_spread/2
gradients = np.zeros((max_wd,max_spread,2))
outfile += '_fullscore'
#1st two terms for full calculation
sum_BG = np.sum(alpha_BG)
lg_sum_BG = math.lgamma(sum_BG)
sum_lg_BG = np.sum(calc_lgamma_vect(alpha_BG))
sum_SRC = np.sum(alpha_SRC)
lg_sum_SRC = math.lgamma(sum_SRC)
sum_lg_SRC = np.sum(calc_lgamma_vect(alpha_SRC))
score1d = np.zeros((N))
sig_wd = 3.
print max_wd
for half_wd in range(1,max_wd,1): # wd is width of the window
for col in range(max_spread):
# evaluate the score of a model that has middle=row, spread=col.
md = col
row = half_wd
lo = max(0, md - sig_wd*half_wd)
hi = min(N-1, md + sig_wd*half_wd)
if ((hi - lo)>1) and (md >= lo) and (md <= hi):
# otherwise it's a fairly silly model!
bound = y[lo:hi+1]
Cxk_slice = Cxk[:,lo:hi+1]
win_size = half_wd*(2*sig_wd) + 1
wgts = scipy.signal.gaussian(win_size,half_wd)
l = math.floor((len(wgts)/2.) - (md-lo))
r = math.ceil((len(wgts)/2.) + (hi-md))
wgts = wgts[l:r]
nk = np.sum(wgts*Cxk_slice,axis=1) + 0.001
#SCORE
SRC_term = calc_full(nk, alpha_SRC, sum_SRC, lg_sum_SRC, sum_lg_SRC)
BG_term = calc_full(nk, alpha_BG, sum_BG, lg_sum_BG, sum_lg_BG)
score[row,col] = SRC_term - BG_term
#score[row,col] where col = x and row = half_wd
#so if score>0 (found source), source extends from [col-row:col+row+1]
#compress to 1d binary array with 0s where score<=0; 1s where score>0
#TODO adapt for more thresholds
if score[row,col]>0:
score1d[col-row:col+row+1] =1
#GRADIENT
grad_m,grad_sigma = calc_gradients(np.arange(lo,hi+1),half_wd,md)
w = calc_grad_weight(nk,alpha_SRC,alpha_BG,np.sum(nk),sum_BG,sum_SRC)
gm = calc_fullgrad(w,Cxk_slice,grad_m)
gs = calc_fullgrad(w,Cxk_slice,grad_sigma)
gradients[row,col,1] = gm
gradients[row,col,0] = gs
print row
#found_sources = mp.contiguous_regions(score1d==1)
#TP=0;FP=0;FN=0
scoresfile = outfile + '_scores.txt'
np.savetxt(scoresfile,score)
binsfile = outfile + '_mybins.txt'
np.savetxt(binsfile,BINS)
gradientfile = outfile + '_gradients.txt'
with file(gradientfile,'w') as out:
out.write('# Array shape: {0}\n'.format(gradients.shape))
for dslice in gradients:
np.savetxt(out,dslice)
out.write('# New slice\n')
Y,X = np.mgrid[0:max_wd,0:max_spread]
U = gradients[:,:,0]
V = gradients[:,:,1]
plt.streamplot(X, Y, U, V,color='k')
gradfig = gradientfile.split('.')[0]
plt.savefig(gradfig)
plt.clf()
score1dfile = outfile + '_score1d'
plt.plot(y,'k.')
if opts.seed:
plt.plot(shape1,'b-')
sigma1=shape1.copy()
sigma1[0:left1]=0;sigma1[right1:sigma1.size]=0
plt.plot(sigma1,'g.')
plt.plot(shape2,'b-')
sigma2=shape2.copy()
sigma2[0:left2]=0;sigma2[right2:sigma2.size]=0
plt.plot(sigma2,'g.')
plt.plot(shape3,'b-')
sigma3=shape3.copy()
sigma3[0:left3]=0;sigma3[right3:sigma3.size]=0
plt.plot(sigma3,'g.')
plt.plot(score1d,'r-')
plt.savefig(score1dfile)
plt.clf()
#make average bins for the figure (TODO: plot all bins)
if opts.dirichlet:
BINS = np.mean(BINS,axis=0)
#make the output figures
if opts.nohisto:
make_figs(x,y,BINS,outfile,score,gradients,histo=False)
else:
make_figs(x,y,BINS,outfile,score,gradients)
|
garibaldu/radioblobs
|
code/code_1d/old_and_extra/score_grad_ascent.py
|
Python
|
gpl-2.0
| 13,040
|
[
"Gaussian"
] |
b0f25c22578ebbe827620f6b69b21a5a2d4a2129a96f5dbcd681d1fe3237824d
|
# -*- coding: utf-8 -*-
"""
Methods that generate or adjusted energy related timeseries based on given assumptions/input
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import scipy.interpolate
import scipy.linalg
import scipy.stats
from .utils import make_timeseries, clean_convert
from .analysis import countweekend_days_per_month
__all__ = ['disag_upsample', 'gen_daily_stoch_el', 'gen_load_from_daily_monthly', 'gen_load_sinus', 'gen_load_from_LDC',
'gen_load_from_PSD', 'gen_gauss_markov', 'remove_outliers', 'gen_demand_response', 'add_noise',
'gen_corr_arrays', 'gen_analytical_LDC']
_EPS = np.finfo(np.float64).eps
def disag_upsample(Load, disag_profile, to_offset='h'):
""" Upsample given timeseries, disaggregating based on given load profiles.
e.g. From daily to hourly. The load of each day is distributed according to the disaggregation profile. The sum of each day remains the same.
Arguments:
Load (pd.Series): Load profile to disaggregate
disag_profile (pd.Series, np.ndarray): disaggregation profile to be used on each timestep of the load. Has to be compatible with selected offset.
to_offset (str): Resolution of upsampling. has to be a valid pandas offset alias. (check `here <http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__ for all available offsets)
Returns:
pd.Series: the upsampled timeseries
"""
#First reindexing to the new resolution.
orig_freq = Load.index.freqstr
start = Load.index[0]
end = Load.index[-1] + 1 * Load.index.freq #An extra period is needed at the end to match the sum FIXME
df1 = Load.reindex(pd.date_range(start, end, freq=to_offset, closed='left'))
def mult_profile(x, profile):
#Normalizing to keep the sum the same..
profile = profile / np.sum(profile)
return x.mean() * profile #using mean() assuming that there is one value and the rest is nan
#then transform per sampled period correspnding to the len(disag_profile)
return df1.resample(orig_freq).transform(mult_profile, disag_profile).dropna()
def gen_daily_stoch_el(total_energy=1.0):
"""Generate stochastic dummy daily load based on hardcoded values. These values are the result of
statistical analysis of electric loads profiles of more than 100 households. Mean and standard deviations per timestep were extracted
from the normalized series. These are fed to :meth:`gen_gauss_markov` method.
Arguments:
total_energy: Sum of produced timeseries (daily load)
Returns:
nd.array: random realization of timeseries
"""
means = np.array([0.02603978, 0.02266633, 0.02121337, 0.02060187, 0.02198724,
0.02731497, 0.03540281, 0.0379463, 0.03646055, 0.03667756,
0.03822946, 0.03983243, 0.04150124, 0.0435474, 0.0463219,
0.05051979, 0.05745442, 0.06379564, 0.06646279, 0.06721004,
0.06510399, 0.05581182, 0.04449689, 0.03340142])
stds = np.array([0.00311355, 0.00320474, 0.00338432, 0.00345542, 0.00380437,
0.00477251, 0.00512785, 0.00527501, 0.00417598, 0.00375874,
0.00378784, 0.00452212, 0.00558736, 0.0067245, 0.00779101,
0.00803175, 0.00749863, 0.00365208, 0.00406937, 0.00482636,
0.00526445, 0.00480919, 0.00397309, 0.00387489])
a = gen_gauss_markov(means,stds, .8)
return a / a.sum() * total_energy
def gen_load_from_daily_monthly(ML, DWL, DNWL, weight=0.5, year=2015):
"""Generate annual timeseries using monthly demand and daily profiles.
Working days and weekends are built from different profiles having different weighting factors.
Arguments:
ML: monthly load (size = 12)
DWL: daily load (working day) (size = 24). Have to be normalized (sum=1)
DNWL: daily load (non working day) (size = 24) Have to be normalized (sum=1)
weight: weighting factor between working and non working day (0 - 1)
Returns:
pd.Series: Generated timeseries
"""
#TODO: refactor. Can i use disag_upsample() ?
if not(np.isclose(DWL.sum(), 1) and np.isclose(DNWL.sum(), 1)):
raise ValueError('Daily profiles should be normalized')
#TODO: Normalize here?
out = make_timeseries(year=year, length=8760, freq='H') # Create empty pandas with datetime index
import calendar
febdays = 29 if calendar.isleap(year) else 28
Days = np.array([31, febdays, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
# Assumptions for non working days per month. Only weekends
# TODO: Custom Calendars with holidays BDays
DaysNW = countweekend_days_per_month(out.resample('d').mean())
DaysW = Days - DaysNW
for month in range(12):
# Estimate total load for working and non working day
TempW = (ML[month] * weight * DaysW[month] /
(weight * DaysW[month] + (1 - weight) * DaysNW[month]) / DaysW[month] )
TempNW = (ML[month] * (1 - weight) * DaysNW[month] /
(weight * DaysW[month] + (1 - weight) * DaysNW[month]) / DaysNW[month])
for hour in range(24):
out.loc[(out.index.month == month + 1) & #months dont start from 0
(out.index.weekday < 5) &
(out.index.hour == hour)] = (TempW * DWL[hour])
out.loc[(out.index.month == month + 1) &
(out.index.weekday >= 5) &
(out.index.hour == hour)] = (TempNW * DNWL[hour])
return out
def gen_load_sinus(daily_1, daily_2, monthly_1, monthly_2, annually_1, annually_2):
""" Generate sinusoidal load with daily, weekly and yearly seasonality. Each term is estimated based
on the following expression: :math:`f(x;A1,A2,w) = A1 \\cos(2 \\pi/w \\cdot x) + A2 \\sin(2 \\pi/w \\cdot x)`
Arguments:
daily_1 (float): cosine coefficient for daily component (period 24)
daily_2 (float): sinus coefficient for daily component (period 24)
monthly_1 (float): cosine coefficient for monthly component (period 168)
monthly_2 (float): sinus coefficient for monthly component (period 168)
annually_1 (float): cosine coefficient for annual component (period 8760)
annually_2 (float): sinus coefficient for annual component (period 8760)
Returns:
pd.Series: Generated timeseries
"""
def sinusFunc(x, w, A1, A2): # fourrier coefficient
return A1 * np.cos(2 * np.pi/w * x) + A2 * np.sin(2 * np.pi/w * x)
x = np.arange(0, 8760)
# daily, weekly, annual periodicity #TODO: custom periodicity
coeffs ={24: (daily_1, daily_2),
168: (monthly_1, monthly_2),
8760: (annually_1, annually_2)
}
out = 0
for period, values in coeffs.items():
out += sinusFunc(x, period, *values)
return make_timeseries(out)
def gen_corr_arrays(Na, length, M, to_uniform=True):
""" Generating correlated normal variates.
Assume one wants to create a vector of random variates Z which is
distributed according to Z~N(μ,Σ) where μ is the vector of means,
and Σ is the variance-covariance matrix.
http://comisef.wikidot.com/tutorial:correlateduniformvariates
Arguments:
Na (int): number of vectors e.g (3)
length (int): generated vector size (e.g 8760)
M (np.ndarray): correlation matrix. Should be of size Na x Na
to_uniform (bool): True if the correlation matrix needs to be adjusted for uniforms
Returns:
np.ndarray: Realization of randomly generated correlated variables. Size : (Na, length) e.g. (3, 8760)
"""
if Na != np.size(M, 0): # rows of pars have to be the same size as rows and cols of M
print('Parameters and corr matrix dimensions do not agree.')
return False
newM = M.copy() # changing an array element without copying it changes it globally!
u = np.random.randn(length, Na)
if min(np.linalg.eig(M)[0]) < 0: # is M positive definite?
print ('Error: Eigenvector is not positive. Trying to make positive, but it may differ from the initial.')
# Make M positive definite:
la, v = np.linalg.eig(newM)
la[la < 0] = np.spacing(1) # Make all negative eigenvalues zero
ladiag = np.diag(la) # Diagonal of eigenvalues
newM = np.dot(np.dot(v, ladiag), v.T) # Estimate new M = v * L * v'
# Transformation is needed to change normal to uniform (Spearman - Pearson)
if to_uniform:
for i in np.arange(0, Na): # 1:Na
for j in np.arange(max(Na-1, i), Na): # max(Na-1,i):Na
if i != j:
newM[i, j] = 2 * np.sin(np.pi * newM[i, j] / 6)
newM[j, i] = 2 * np.sin(np.pi * newM[j, i] / 6)
if min(np.linalg.eig(newM)[0]) <= 0:
print ('Error: Eigenvector is still not positive. Aborting')
return False
cF = scipy.linalg.cholesky(newM)
Y = np.dot(u, cF).T
Y = scipy.stats.norm.cdf(Y) # remove if you produce random.rand?
return Y
def gen_load_from_LDC(LDC, Y=None, N=8760):
""" Generate loads based on a Inverse CDF, such as a Load Duration Curve (LDC)
Inverse transform sampling: Compute the value x such that F(x) = u.
Take x to be the random number drawn from the distribution described by F.
.. note::
Due to the sampling process this function produces load profiles with unrealistic temporal sequence, which means that they cannot be treated as
timeseries. It is recommended that :meth:`gen_load_from_PSD` is used afterwards.
Arguments:
LDC (np.ndarray): Load duration curve (2 x N) vector of the x, y coordinates of an LDC function (results of (get_LDC).
x coordinates have to be normalized (max: 1 => 8760hrs )
Y (nd.array): a vector of random numbers. To be used for correlated loads.
If None is supplied a random vector (8760) will be created.
N (int): Length of produced timeseries (if Y is not provided)
Returns:
np.ndarray: vector with the same size as Y that respects the statistical distribution of the LDC
"""
if Y is None: # if there is no Y, generate a random vector
Y = np.random.rand(N)
func_inv = scipy.interpolate.interp1d(LDC[0], LDC[1], bounds_error=False, fill_value=0)
simulated_loads = func_inv(Y)
# ------- Faster way: # np.interp is faster but have to sort LDC
# if np.all(np.diff(LDC[0]) > 0) == False: #if sorted
#idx = np.argsort(LDC[0])
#LDC_sorted = LDC[:, idx].copy()
#simulated_loads = np.interp(Y, LDC_sorted[0], LDC_sorted[1])
# no need to insert timed index since there is no spectral information
return simulated_loads
def gen_load_from_PSD(Sxx, x, dt=1):
"""
Algorithm for generating samples of a random process conforming to spectral
density Sxx(w) and probability density function p(x).
.. note::
This is done by an iterative process which 'shuffles' the timeseries till convergence of both
power spectrum and marginal distribution is reached.
Also known as "Iterated Amplitude Adjusted Fourier Transform (IAAFT). Adopted from `J.M. Nichols, C.C. Olson, J.V. Michalowicz, F. Bucholtz, (2010), "A simple algorithm for generating spectrally colored, non-Gaussian signals" Probabilistic Engineering Mechanics, Vol 25, 315-322`
and `Schreiber, T. and Schmitz, A. (1996) "Improved Surrogate Data for Nonlinearity Tests", Physical Review Letters, Vol 77, 635-638.`
Arguments:
Sxx: Spectral density (two sided)
x: Sequence of observations created by the desirable PDF. You can use :meth:`gen_load_from_LDC` for that.
dt: Desired temporal sampling interval. [Dt = 2pi / (N * Dw)]
Returns:
pd.Series: The spectrally corrected timeseries
"""
N = len(x)
Sxx[int(N/2)+1] = 0 # zero out the DC component (remove mean)
Xf = np.sqrt(2 * np.pi * N * Sxx / dt) # Convert PSD to Fourier amplitudes
Xf = np.fft.ifftshift(Xf) # Put in Matlab FT format
# The following lines were commented out because they outscale the data
# modifying thus its PDF. However, according to Nichols et al. they
# guarantee that the new data match the signal variance
#vs = (2 * np.pi / N / dt) * sum(Sxx) * (N / (N-1)) # Get signal variance (as determined by PSD)
#out = x * np.sqrt(vs / np.var(x))
out = x
mx = np.mean(out)
out = out - mx # subtract the mean
indx = np.argsort(out)
xo = out[indx].copy() # store sorted signal xo with correct p(x)
k = 1
indxp = np.zeros(N) # initialize counter
while(k):
Rk = np.fft.fft(x) # Compute FT
Rp = np.angle(Rk) # ==> np.arctan2(np.imag(Rk), np.real(Rk)) # Get phases
out = np.real(np.fft.ifft(np.exp(1j * Rp) * np.abs(Xf))) # Give signal correct PSD
indx = np.argsort(out) # Get rank of signal with correct PSD
out[indx] = xo # rank reorder (simulate nonlinear transform)
k = k + 1 # increment counter
if np.array_equal(indx, indxp):
print('Converged after {} iterations'.format(k))
k = 0 # if we converged, stop
indxp = indx # re-set ordering for next iter
out = out + mx # Put back in the mean
return out
def gen_gauss_markov(mu, st, r):
""" Generate timeseries based on means, stadnard deviation and autocorrelation per timestep
.. note::
Based on `A.M. Breipohl, F.N. Lee, D. Zhai, R. Adapa, A Gauss-Markov load model for the application in risk evaluation
and production simulation, Transactions on Power Systems, 7 (4) (1992), pp. 1493-1499`
Arguments:
mu: array of means. Can be either 1d or 2d
st: array of standard deviations. Can be either 1d or 2d. Can be either scalar (same for entire timeseries or array with the same length as the timeseries
r: Autoregressive coefficient AR(1). Has to be between [-1,1]. Can be either scalar (same for entire timeseries or array with the same length as the timeseries
Returns:
pd.Series, pd.DataFrame: a realization of the timeseries
"""
mu = np.atleast_2d(mu)
loadlength = mu.shape
rndN = np.random.randn(*loadlength)
if np.atleast_2d(st).shape[1] == 1:
noisevector = st * np.ones(loadlength)
elif len(st) == loadlength[1]:
noisevector = np.atleast_2d(st)
else:
raise ValueError('Length of standard deviations must be the same as the length of means. You can also use one value for the entire series')
if np.atleast_2d(r).shape[1] == 1:
rvector = r * np.ones(loadlength)
elif len(r) == loadlength[1]:
rvector = np.atleast_2d(r)
else:
raise ValueError('Length of autocorrelations must be the same as the length of means. You can also use one value for the entire series')
y = np.zeros(loadlength)
noisevector[noisevector == 0] = _EPS
y[:,0] = mu[:,0] + noisevector[:, 0] * rndN[:, 0]
# for t in mu.T:
for i in range(mu.shape[1]):
y[:,i] = (mu[:,i] +
r * noisevector[:, i] /
noisevector[:, i - 1] * (y[:, i - 1] - mu[:, i - 1]) +
noisevector[:, i] * np.sqrt(1 - rvector[:, i] ** 2) * rndN[:, i])
return y.squeeze()
def add_noise(Load, mode, st, r=0.9, Lmin=0):
""" Add noise with given characteristics.
Arguments:
Load (pd.Series,pd.DataFrame): 1d or 2d timeseries
mode (int):1 Normal Distribution, 2: Uniform Distribution, 3: Gauss Markov (autoregressive gaussian)
st (float): Noise parameter. Scaling of random values
r (float): Applies only for mode 3. Autoregressive coefficient AR(1). Has to be between [-1,1]
Lmin (float): minimum load values. This is used to trunc values below zero if they are generated with a lot of noise
Returns:
pd.Series: Load with noise
"""
L = np.atleast_2d(Load)
if st == 0:
print('No noise to add')
return Load
loadlength = L.shape # 8760
if mode == 1: # Normal
noisevector = st * np.random.randn(*loadlength) # gauss.. should it have a zero sum?
out = L * (1 + noisevector)
elif mode == 2: # Uniform
noisevector = st * np.random.rand(*loadlength)
out = L * ((1 - st) + st * noisevector)
elif mode == 3: # Gauss-Markov, same as
out = gen_gauss_markov(L, st, r)
else:
raise ValueError('Not available mode')
out[out < Lmin] = Lmin # remove negative elements
return clean_convert(np.squeeze(out), force_timed_index=True, freq='h') # assume hourly timeseries if no timeindex is passed
def gen_analytical_LDC(U, duration=8760, bins=1000):
r"""Generates the Load Duration Curve based on empirical parameters. The following equation is used.
:math:`f(x;P,CF,BF) = \\frac{P-x}{P-BF \\cdot P}^{\\frac{CF-1}{BF-CF}}`
Arguments:
U (tuple): parameter vector [Peak load, capacity factor%, base load%, hours] or dict
Returns:
np.ndarray: a 2D array [x, y] ready for plotting (e.g. plt(*gen_analytical_LDC(U)))
"""
if isinstance(U, dict):
P = U['peak'] # peak load
CF = U['LF'] # load factor
BF = U['base'] # base load
h = U['hourson'] # hours
else: #unpack
P, CF, BF, h = U
x = np.linspace(0, P, bins)
ff = h * ((P - x)/(P - BF * P))**((CF - 1)/(BF - CF))
ff[x < (BF*P)] = h
ff[x > P] = 0
return ff/duration, x
def gen_demand_response(Load, percent_peak_hrs_month=0.03, percent_shifted=0.05, shave=False):
"""Simulate a demand response mechanism that makes the load profile less peaky.
The load profile is analyzed per selected period (currently month) and the peak hours have their load shifted
to low load hours or shaved. When not shaved the total load is the same as that one from the initial timeseries,
otherwise it is smaller due to the shaved peaks. The peak load is reduced by a predefined percentage.
Arguments:
Load (pd.Series): Load
percent_peak_hrs_month (float): fraction of hours to be shifted
percent_shifted (float): fraction of energy to be shifted if the day is tagged for shifting/shaving
shave (bool): If False peak load will be transfered to low load hours, otherwise it will be shaved.
Return:
pd.Series: New load profile with reduced peaks. The peak can be shifted to low load hours or shaved
"""
if not Load.index.is_all_dates:
print ('Need date Time indexed series. Trying to force one.')
Load = clean_convert(Load, force_timed_index=True)
demand = Load
def hours_per_month(demand):
"""Assign to each row hours per month"""
dic_hours_per_month = demand.groupby(demand.index.month).count().to_dict()
return demand.resample('m').transform(lambda x: list(map(dic_hours_per_month.get, x.index.month)))
# Monthly demand rank
# TODO: parametrize: we can check peaks on a weekly or daily basis
demand_m_rank = demand.resample('m').transform(lambda x: x.rank(method='min', ascending=False))
# find which hours are going to be shifted
bool_shift_from = demand_m_rank <= np.round(hours_per_month(demand) * percent_peak_hrs_month)
DR_shift_from = percent_shifted * demand # demand_fpeak * total_demand * percent_shifted
DR_shift_from[~bool_shift_from] = 0
# find hours that are going to have
if shave:
#If (peak) shaving we do not shift the loads anywhere
DR_shift_to = 0
else:
# Estimate amount of load to be shifted per month
sum_shifted = DR_shift_from.groupby(DR_shift_from.index.month).sum()
count_shifted = DR_shift_from[DR_shift_from > 0].groupby(DR_shift_from[DR_shift_from > 0].index.month).count()
shift_to_month = sum_shifted / count_shifted
#Find which hours are going to be filled with the shifted load
bool_shift_to = demand_m_rank > np.round(hours_per_month(demand) * (1 - percent_peak_hrs_month))
df_month = pd.Series(demand.index.month, index=demand.index)
DR_shift_to = df_month.map(shift_to_month)
DR_shift_to[~bool_shift_to] = 0
# Adjusted hourly demand
dem_adj = demand.copy()
dem_adj[bool_shift_from] = dem_adj[bool_shift_from] * (1 - percent_shifted)
dem_adj[~bool_shift_from] = dem_adj[~bool_shift_from] + DR_shift_to
# In case of load shift check that the sum of initial timeseries is similar to the reshaped one
if not np.isclose(dem_adj.sum(), Load.sum()):
raise ValueError('Sum is not the same. Probably you overdid it with the shifting parameters.'
'Please try with more conservative ones.')
return dem_adj
def remove_outliers(Load, **kwargs):
""" Removes outliers identified by :meth:`detect_outliers` and replaces them by interpolated value.
Arguments:
Load: input timeseries
**kwargs: Exposes keyword arguments of :meth:`detect_outliers`
Returns:
Timeseries cleaned from outliers
"""
from .analysis import detect_outliers
outlier_idx = detect_outliers(Load, **kwargs)
filtered_series = Load.copy()
filtered_series[outlier_idx] = np.nan
return filtered_series.interpolate(method='time')
|
kavvkon/enlopy
|
enlopy/generate.py
|
Python
|
bsd-3-clause
| 21,354
|
[
"Gaussian"
] |
0530705b37449ff1ab6102798bb42f8e6fa64859cdf2c922e4f7f9fd31635232
|
#
# Copyright 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
# Author(s): Brian C. Lane <bcl@redhat.com>
import os
import unittest
import tempfile
import shutil
import subprocess
class BaseTestCase(unittest.TestCase):
def setUp(self):
# create the directory used for file/folder tests
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
# remove the testing directory
if self.tmpdir and os.path.isdir(self.tmpdir):
try:
with open(self.tmpdir + "/ks.info") as f:
for line in f:
if line.startswith("parsed_kickstart="):
filename = line.partition("=")[2].strip().replace('"', "")
os.remove(filename)
break
except OSError:
pass
shutil.rmtree(self.tmpdir)
class ParseKickstartTestCase(BaseTestCase):
@classmethod
def setUpClass(cls):
cls.command = os.path.abspath(os.path.join(os.environ["top_srcdir"], "dracut/parse-kickstart"))
def execParseKickstart(self, ks_file):
try:
output = subprocess.check_output([self.command, "--tmpdir", self.tmpdir, ks_file], universal_newlines=True)
except subprocess.CalledProcessError as e:
return str(e).splitlines()
return str(output).splitlines()
def cdrom_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""cdrom """)
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "inst.repo=cdrom", lines)
def harddrive_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""harddrive --partition=sda4 --dir=/path/to/tree""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "inst.repo=hd:sda4:/path/to/tree", lines)
def nfs_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""nfs --server=host.at.foo.com --dir=/path/to/tree --opts=nolock,timeo=50""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "inst.repo=nfs:nolock,timeo=50:host.at.foo.com:/path/to/tree", lines)
def nfs_test_2(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""nfs --server=host.at.foo.com --dir=/path/to/tree""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "inst.repo=nfs:host.at.foo.com:/path/to/tree", lines)
def url_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""url --url=https://host.at.foo.com/path/to/tree --noverifyssl --proxy=http://localhost:8123""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(len(lines), 3, lines)
self.assertEqual(lines[0], "inst.repo=https://host.at.foo.com/path/to/tree", lines)
self.assertEqual(lines[1], "rd.noverifyssl", lines)
self.assertEqual(lines[2], "proxy=http://localhost:8123", lines)
def updates_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""updates http://host.at.foo.com/path/to/updates.img""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "live.updates=http://host.at.foo.com/path/to/updates.img", lines)
def mediacheck_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""mediacheck""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "rd.live.check", lines)
def driverdisk_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""driverdisk sda5""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "inst.dd=hd:sda5")
def driverdisk_test_2(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""driverdisk --source=http://host.att.foo.com/path/to/dd""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "inst.dd=http://host.att.foo.com/path/to/dd", lines)
def network_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""network --device=link --bootproto=dhcp --activate""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertRegex(lines[0], r"ip=[^\s:]+:dhcp: bootdev=[^\s:]+", lines)
def network_test_2(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""network --device=AA:BB:CC:DD:EE:FF --bootproto=dhcp --activate""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "ifname=ksdev0:aa:bb:cc:dd:ee:ff ip=ksdev0:dhcp: bootdev=ksdev0", lines)
def network_static_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""network --device=link --bootproto=dhcp --activate
network --device=lo --bootproto=static --ip=10.0.2.15 --netmask=255.255.255.0 --gateway=10.0.2.254 --nameserver=10.0.2.10
""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertRegex(lines[0], r"ip=[^\s:]+:dhcp: bootdev=[^\s:]+", lines)
ifcfg_lines = sorted(open(self.tmpdir+"/ifcfg/ifcfg-lo").readlines())
self.assertEqual(ifcfg_lines[0], "# Generated by parse-kickstart\n", ifcfg_lines)
self.assertEqual(ifcfg_lines[1], 'BOOTPROTO="static"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[2], 'DEVICE="lo"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[3], 'DNS1="10.0.2.10"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[4], 'GATEWAY="10.0.2.254"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[5], 'IPADDR="10.0.2.15"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[6], 'IPV6INIT="yes"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[7], 'NETMASK="255.255.255.0"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[8], 'ONBOOT="no"\n', ifcfg_lines)
self.assertTrue(ifcfg_lines[9].startswith("UUID="), ifcfg_lines)
def network_team_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""network --device=link --bootproto=dhcp --activate
network --device team0 --activate --bootproto static --ip=10.34.102.222 --netmask=255.255.255.0 --gateway=10.34.102.254 --nameserver=10.34.39.2 --teamslaves="p3p1'{\"prio\": -10, \"sticky\": true}'" --teamconfig="{\"runner\": {\"name\": \"activebackup\"}}"
""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertRegex(lines[0], r"ip=[^\s:]+:dhcp: bootdev=[^\s:]+", lines)
team_lines = sorted(open(self.tmpdir+"/ifcfg/ifcfg-team0_slave_0").readlines())
self.assertEqual(team_lines[0], "# Generated by parse-kickstart\n", team_lines)
self.assertEqual(team_lines[1], 'DEVICE="p3p1"\n', team_lines)
self.assertEqual(team_lines[2], 'DEVICETYPE="TeamPort"\n', team_lines)
self.assertEqual(team_lines[3], 'NAME="team0 slave 0"\n', team_lines)
self.assertEqual(team_lines[4], 'ONBOOT="yes"\n', team_lines)
self.assertEqual(team_lines[5], 'TEAM_MASTER="team0"\n', team_lines)
self.assertEqual(team_lines[6], 'TEAM_PORT_CONFIG="{prio: -10, sticky: true}"\n', team_lines)
def network_bond_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""network --device=link --bootproto=dhcp --activate
network --device=eth0 --bootproto=dhcp --bondslaves=eth0,eth1
""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertRegex(lines[0], r"ip=[^\s:]+:dhcp: bootdev=[^\s:]+", lines)
ifcfg_lines = sorted(open(self.tmpdir+"/ifcfg/ifcfg-eth0_slave_1").readlines())
self.assertEqual(ifcfg_lines[0], "# Generated by parse-kickstart\n", ifcfg_lines)
self.assertTrue(ifcfg_lines[2].startswith("MASTER="), ifcfg_lines)
self.assertEqual(ifcfg_lines[3], 'NAME="eth0 slave 1"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[4], 'ONBOOT="yes"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[5], 'TYPE="Ethernet"\n', ifcfg_lines)
self.assertTrue(ifcfg_lines[6].startswith("UUID="), ifcfg_lines)
def network_bridge_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""network --device=link --bootproto=dhcp --activate
network --device br0 --activate --bootproto dhcp --bridgeslaves=eth0 --bridgeopts=stp=6.0,forward_delay=2
""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertRegex(lines[0], r"ip=[^\s:]+:dhcp: bootdev=[^\s:]+", lines)
ifcfg_lines = sorted(open(self.tmpdir+"/ifcfg/ifcfg-br0").readlines())
self.assertEqual(ifcfg_lines[0], "# Generated by parse-kickstart\n", ifcfg_lines)
self.assertEqual(ifcfg_lines[1], 'BOOTPROTO="dhcp"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[2], 'DELAY="2"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[3], 'DEVICE="br0"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[4], 'IPV6INIT="yes"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[5], 'NAME="Bridge connection br0"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[6], 'ONBOOT="yes"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[7], 'STP="6.0"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[8], 'TYPE="Bridge"\n', ifcfg_lines)
self.assertTrue(ifcfg_lines[9].startswith("UUID="), ifcfg_lines)
bridge_lines = sorted(open(self.tmpdir+"/ifcfg/ifcfg-br0_slave_1").readlines())
self.assertEqual(bridge_lines[0], "# Generated by parse-kickstart\n", bridge_lines)
self.assertEqual(bridge_lines[1], 'BRIDGE="br0"\n', bridge_lines)
self.assertEqual(bridge_lines[3], 'NAME="br0 slave 1"\n', bridge_lines)
self.assertEqual(bridge_lines[4], 'ONBOOT="yes"\n', bridge_lines)
self.assertEqual(bridge_lines[5], 'TYPE="Ethernet"\n', bridge_lines)
self.assertTrue(bridge_lines[6].startswith("UUID="), bridge_lines)
def network_ipv6_only_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""network --noipv4 --hostname=blah.test.com --ipv6=1:2:3:4:5:6:7:8 --device lo --nameserver=1:1:1:1::,2:2:2:2::""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertRegex(lines[0], r"ip=\[1:2:3:4:5:6:7:8\]:.*")
ifcfg_lines = sorted(open(self.tmpdir+"/ifcfg/ifcfg-lo").readlines())
self.assertEqual(ifcfg_lines[1], 'DEVICE="lo"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[2], 'DNS1="1:1:1:1::"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[3], 'DNS2="2:2:2:2::"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[4], 'IPV6ADDR="1:2:3:4:5:6:7:8"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[5], 'IPV6INIT="yes"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[6], 'IPV6_AUTOCONF="no"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[7], 'ONBOOT="yes"\n', ifcfg_lines)
self.assertTrue(ifcfg_lines[8].startswith("UUID="), ifcfg_lines)
def network_vlanid_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""network --device=link --bootproto=dhcp --activate
network --device=lo --vlanid=171
""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertRegex(lines[0], r"ip=[^\s:]+:dhcp: bootdev=[^\s:]+", lines)
ifcfg_lines = sorted(open(self.tmpdir+"/ifcfg/ifcfg-lo.171").readlines())
self.assertEqual(ifcfg_lines[1], 'BOOTPROTO="dhcp"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[2], 'DEVICE="lo.171"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[3], 'IPV6INIT="yes"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[4], 'NAME="VLAN connection lo.171"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[5], 'ONBOOT="no"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[6], 'PHYSDEV="lo"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[7], 'TYPE="Vlan"\n', ifcfg_lines)
self.assertTrue(ifcfg_lines[8].startswith("UUID="), ifcfg_lines)
self.assertEqual(ifcfg_lines[9], 'VLAN="yes"\n', ifcfg_lines)
self.assertEqual(ifcfg_lines[10], 'VLAN_ID="171"\n', ifcfg_lines)
def displaymode_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""cmdline""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "inst.cmdline", lines)
def displaymode_test_2(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""graphical""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "inst.graphical", lines)
def displaymode_test_3(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""text""")
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "inst.text", lines)
def bootloader_test(self):
with tempfile.NamedTemporaryFile(mode="w+t") as ks_file:
ks_file.write("""bootloader --extlinux """)
ks_file.flush()
lines = self.execParseKickstart(ks_file.name)
self.assertEqual(lines[0], "extlinux", lines)
|
dashea/anaconda
|
tests/dracut_tests/parse-kickstart_test.py
|
Python
|
gpl-2.0
| 15,355
|
[
"Brian"
] |
916e54a94ca2419cdeb45b5ac3f45bcd7161e9e99c659fd73dc654f439e37690
|
""" @package antlr3.tree
@brief ANTLR3 runtime package, tree module
This module contains all support classes for AST construction and tree parsers.
"""
# begin[licence]
#
# [The "BSD licence"]
# Copyright (c) 2005-2012 Terence Parr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# end[licence]
# lot's of docstrings are missing, don't complain for now...
# pylint: disable-msg=C0111
import re
from antlr3.constants import UP, DOWN, EOF, INVALID_TOKEN_TYPE
from antlr3.recognizers import BaseRecognizer, RuleReturnScope
from antlr3.streams import IntStream
from antlr3.tokens import CommonToken, Token, INVALID_TOKEN
from antlr3.exceptions import MismatchedTreeNodeException, \
MissingTokenException, UnwantedTokenException, MismatchedTokenException, \
NoViableAltException
############################################################################
#
# tree related exceptions
#
############################################################################
class RewriteCardinalityException(RuntimeError):
"""
@brief Base class for all exceptions thrown during AST rewrite construction.
This signifies a case where the cardinality of two or more elements
in a subrule are different: (ID INT)+ where |ID|!=|INT|
"""
def __init__(self, elementDescription):
RuntimeError.__init__(self, elementDescription)
self.elementDescription = elementDescription
def getMessage(self):
return self.elementDescription
class RewriteEarlyExitException(RewriteCardinalityException):
"""@brief No elements within a (...)+ in a rewrite rule"""
def __init__(self, elementDescription=None):
RewriteCardinalityException.__init__(self, elementDescription)
class RewriteEmptyStreamException(RewriteCardinalityException):
"""
@brief Ref to ID or expr but no tokens in ID stream or subtrees in expr stream
"""
pass
############################################################################
#
# basic Tree and TreeAdaptor interfaces
#
############################################################################
class Tree(object):
"""
@brief Abstract baseclass for tree nodes.
What does a tree look like? ANTLR has a number of support classes
such as CommonTreeNodeStream that work on these kinds of trees. You
don't have to make your trees implement this interface, but if you do,
you'll be able to use more support code.
NOTE: When constructing trees, ANTLR can build any kind of tree; it can
even use Token objects as trees if you add a child list to your tokens.
This is a tree node without any payload; just navigation and factory stuff.
"""
def getChild(self, i):
raise NotImplementedError
def getChildCount(self):
raise NotImplementedError
def getParent(self):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def setParent(self, t):
"""Tree tracks parent and child index now > 3.0"""
raise NotImplementedError
def hasAncestor(self, ttype):
"""Walk upwards looking for ancestor with this token type."""
raise NotImplementedError
def getAncestor(self, ttype):
"""Walk upwards and get first ancestor with this token type."""
raise NotImplementedError
def getAncestors(self):
"""Return a list of all ancestors of this node.
The first node of list is the root and the last is the parent of
this node.
"""
raise NotImplementedError
def getChildIndex(self):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def setChildIndex(self, index):
"""This node is what child index? 0..n-1"""
raise NotImplementedError
def freshenParentAndChildIndexes(self):
"""Set the parent and child index values for all children"""
raise NotImplementedError
def addChild(self, t):
"""
Add t as a child to this node. If t is null, do nothing. If t
is nil, add all children of t to this' children.
"""
raise NotImplementedError
def setChild(self, i, t):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, i):
raise NotImplementedError
def replaceChildren(self, startChildIndex, stopChildIndex, t):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
raise NotImplementedError
def isNil(self):
"""
Indicates the node is a nil node but may still have children, meaning
the tree is a flat list.
"""
raise NotImplementedError
def getTokenStartIndex(self):
"""
What is the smallest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStartIndex(self, index):
raise NotImplementedError
def getTokenStopIndex(self):
"""
What is the largest token index (indexing from 0) for this node
and its children?
"""
raise NotImplementedError
def setTokenStopIndex(self, index):
raise NotImplementedError
def dupNode(self):
raise NotImplementedError
def getType(self):
"""Return a token type; needed for tree parsing."""
raise NotImplementedError
def getText(self):
raise NotImplementedError
def getLine(self):
"""
In case we don't have a token payload, what is the line for errors?
"""
raise NotImplementedError
def getCharPositionInLine(self):
raise NotImplementedError
def toStringTree(self):
raise NotImplementedError
def toString(self):
raise NotImplementedError
class TreeAdaptor(object):
"""
@brief Abstract baseclass for tree adaptors.
How to create and navigate trees. Rather than have a separate factory
and adaptor, I've merged them. Makes sense to encapsulate.
This takes the place of the tree construction code generated in the
generated code in 2.x and the ASTFactory.
I do not need to know the type of a tree at all so they are all
generic Objects. This may increase the amount of typecasting needed. :(
"""
# C o n s t r u c t i o n
def createWithPayload(self, payload):
"""
Create a tree node from Token object; for CommonTree type trees,
then the token just becomes the payload. This is the most
common create call.
Override if you want another kind of node to be built.
"""
raise NotImplementedError
def dupNode(self, treeNode):
"""Duplicate a single tree node.
Override if you want another kind of node to be built."""
raise NotImplementedError
def dupTree(self, tree):
"""Duplicate tree recursively, using dupNode() for each node"""
raise NotImplementedError
def nil(self):
"""
Return a nil node (an empty but non-null node) that can hold
a list of element as the children. If you want a flat tree (a list)
use "t=adaptor.nil(); t.addChild(x); t.addChild(y);"
"""
raise NotImplementedError
def errorNode(self, input, start, stop, exc):
"""
Return a tree node representing an error. This node records the
tokens consumed during error recovery. The start token indicates the
input symbol at which the error was detected. The stop token indicates
the last symbol consumed during recovery.
You must specify the input stream so that the erroneous text can
be packaged up in the error node. The exception could be useful
to some applications; default implementation stores ptr to it in
the CommonErrorNode.
This only makes sense during token parsing, not tree parsing.
Tree parsing should happen only when parsing and tree construction
succeed.
"""
raise NotImplementedError
def isNil(self, tree):
"""Is tree considered a nil node used to make lists of child nodes?"""
raise NotImplementedError
def addChild(self, t, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs. Do nothing if t or child is null.
"""
raise NotImplementedError
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
raise NotImplementedError
def rulePostProcessing(self, root):
"""
Given the root of the subtree created for this rule, post process
it to do any simplifications or whatever you want. A required
behavior is to convert ^(nil singleSubtree) to singleSubtree
as the setting of start/stop indexes relies on a single non-nil root
for non-flat trees.
Flat trees such as for lists like "idlist : ID+ ;" are left alone
unless there is only one ID. For a list, the start/stop indexes
are set in the nil node.
This method is executed after all rule tree construction and right
before setTokenBoundaries().
"""
raise NotImplementedError
def getUniqueID(self, node):
"""For identifying trees.
How to identify nodes so we can say "add node to a prior node"?
Even becomeRoot is an issue. Use System.identityHashCode(node)
usually.
"""
raise NotImplementedError
# R e w r i t e R u l e s
def createFromToken(self, tokenType, fromToken, text=None):
"""
Create a new node derived from a token, with a new token type and
(optionally) new text.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG[$tokenLabel] or IMAG[$tokenLabel "IMAG"].
This should invoke createToken(Token).
"""
raise NotImplementedError
def createFromType(self, tokenType, text):
"""Create a new node derived from a token, with a new token type.
This is invoked from an imaginary node ref on right side of a
rewrite rule as IMAG["IMAG"].
This should invoke createToken(int,String).
"""
raise NotImplementedError
# C o n t e n t
def getType(self, t):
"""For tree parsing, I need to know the token type of a node"""
raise NotImplementedError
def setType(self, t, type):
"""Node constructors can set the type of a node"""
raise NotImplementedError
def getText(self, t):
raise NotImplementedError
def setText(self, t, text):
"""Node constructors can set the text of a node"""
raise NotImplementedError
def getToken(self, t):
"""Return the token object from which this node was created.
Currently used only for printing an error message.
The error display routine in BaseRecognizer needs to
display where the input the error occurred. If your
tree of limitation does not store information that can
lead you to the token, you can create a token filled with
the appropriate information and pass that back. See
BaseRecognizer.getErrorMessage().
"""
raise NotImplementedError
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Where are the bounds in the input token stream for this node and
all children? Each rule that creates AST nodes will call this
method right before returning. Flat trees (i.e., lists) will
still usually have a nil root node just to hold the children list.
That node would contain the start/stop indexes then.
"""
raise NotImplementedError
def getTokenStartIndex(self, t):
"""
Get the token start index for this subtree; return -1 if no such index
"""
raise NotImplementedError
def getTokenStopIndex(self, t):
"""
Get the token stop index for this subtree; return -1 if no such index
"""
raise NotImplementedError
# N a v i g a t i o n / T r e e P a r s i n g
def getChild(self, t, i):
"""Get a child 0..n-1 node"""
raise NotImplementedError
def setChild(self, t, i, child):
"""Set ith child (0..n-1) to t; t must be non-null and non-nil node"""
raise NotImplementedError
def deleteChild(self, t, i):
"""Remove ith child and shift children down from right."""
raise NotImplementedError
def getChildCount(self, t):
"""How many children? If 0, then this is a leaf node"""
raise NotImplementedError
def getParent(self, t):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setParent(self, t, parent):
"""
Who is the parent node of this node; if null, implies node is root.
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def getChildIndex(self, t):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def setChildIndex(self, t, index):
"""
What index is this node in the child list? Range: 0..n-1
If your node type doesn't handle this, it's ok but the tree rewrites
in tree parsers need this functionality.
"""
raise NotImplementedError
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
# Misc
def create(self, *args):
"""
Deprecated, use createWithPayload, createFromToken or createFromType.
This method only exists to mimic the Java interface of TreeAdaptor.
"""
if len(args) == 1 and isinstance(args[0], Token):
# Object create(Token payload);
## warnings.warn(
## "Using create() is deprecated, use createWithPayload()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createWithPayload(args[0])
if (len(args) == 2
and isinstance(args[0], int)
and isinstance(args[1], Token)):
# Object create(int tokenType, Token fromToken);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1])
if (len(args) == 3
and isinstance(args[0], int)
and isinstance(args[1], Token)
and isinstance(args[2], str)):
# Object create(int tokenType, Token fromToken, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromToken()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromToken(args[0], args[1], args[2])
if (len(args) == 2
and isinstance(args[0], int)
and isinstance(args[1], str)):
# Object create(int tokenType, String text);
## warnings.warn(
## "Using create() is deprecated, use createFromType()",
## DeprecationWarning,
## stacklevel=2
## )
return self.createFromType(args[0], args[1])
raise TypeError(
"No create method with this signature found: {}"
.format(', '.join(type(v).__name__ for v in args)))
############################################################################
#
# base implementation of Tree and TreeAdaptor
#
# Tree
# \- BaseTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
#
############################################################################
class BaseTree(Tree):
"""
@brief A generic tree implementation with no payload.
You must subclass to
actually have any user data. ANTLR v3 uses a list of children approach
instead of the child-sibling approach in v2. A flat tree (a list) is
an empty node whose children represent the list. An empty, but
non-null node is called "nil".
"""
# BaseTree is abstract, no need to complain about not implemented abstract
# methods
# pylint: disable-msg=W0223
def __init__(self, node=None):
"""
Create a new node from an existing node does nothing for BaseTree
as there are no fields other than the children list, which cannot
be copied as the children are not considered part of this node.
"""
super().__init__()
self.children = []
self.parent = None
self.childIndex = 0
def getChild(self, i):
try:
return self.children[i]
except IndexError:
return None
def getChildren(self):
"""@brief Get the children internal List
Note that if you directly mess with
the list, do so at your own risk.
"""
# FIXME: mark as deprecated
return self.children
def getFirstChildWithType(self, treeType):
for child in self.children:
if child.getType() == treeType:
return child
return None
def getChildCount(self):
return len(self.children)
def addChild(self, childTree):
"""Add t as child of this node.
Warning: if t has no children, but child does
and child isNil then this routine moves children to t via
t.children = child.children; i.e., without copying the array.
"""
# this implementation is much simpler and probably less efficient
# than the mumbo-jumbo that Ter did for the Java runtime.
if childTree is None:
return
if childTree.isNil():
# t is an empty node possibly with children
if self.children is childTree.children:
raise ValueError("attempt to add child list to itself")
# fix parent pointer and childIndex for new children
for idx, child in enumerate(childTree.children):
child.parent = self
child.childIndex = len(self.children) + idx
self.children += childTree.children
else:
# child is not nil (don't care about children)
self.children.append(childTree)
childTree.parent = self
childTree.childIndex = len(self.children) - 1
def addChildren(self, children):
"""Add all elements of kids list as children of this node"""
self.children += children
def setChild(self, i, t):
if t is None:
return
if t.isNil():
raise ValueError("Can't set single child to a list")
self.children[i] = t
t.parent = self
t.childIndex = i
def deleteChild(self, i):
killed = self.children[i]
del self.children[i]
# walk rest and decrement their child indexes
for idx, child in enumerate(self.children[i:]):
child.childIndex = i + idx
return killed
def replaceChildren(self, startChildIndex, stopChildIndex, newTree):
"""
Delete children from start to stop and replace with t even if t is
a list (nil-root tree). num of children can increase or decrease.
For huge child lists, inserting children can force walking rest of
children to set their childindex; could be slow.
"""
if (startChildIndex >= len(self.children)
or stopChildIndex >= len(self.children)):
raise IndexError("indexes invalid")
replacingHowMany = stopChildIndex - startChildIndex + 1
# normalize to a list of children to add: newChildren
if newTree.isNil():
newChildren = newTree.children
else:
newChildren = [newTree]
replacingWithHowMany = len(newChildren)
delta = replacingHowMany - replacingWithHowMany
if delta == 0:
# if same number of nodes, do direct replace
for idx, child in enumerate(newChildren):
self.children[idx + startChildIndex] = child
child.parent = self
child.childIndex = idx + startChildIndex
else:
# length of children changes...
# ...delete replaced segment...
del self.children[startChildIndex:stopChildIndex+1]
# ...insert new segment...
self.children[startChildIndex:startChildIndex] = newChildren
# ...and fix indeces
self.freshenParentAndChildIndexes(startChildIndex)
def isNil(self):
return False
def freshenParentAndChildIndexes(self, offset=0):
for idx, child in enumerate(self.children[offset:]):
child.childIndex = idx + offset
child.parent = self
def sanityCheckParentAndChildIndexes(self, parent=None, i=-1):
if parent != self.parent:
raise ValueError(
"parents don't match; expected {!r} found {!r}"
.format(parent, self.parent))
if i != self.childIndex:
raise ValueError(
"child indexes don't match; expected {} found {}"
.format(i, self.childIndex))
for idx, child in enumerate(self.children):
child.sanityCheckParentAndChildIndexes(self, idx)
def getChildIndex(self):
"""BaseTree doesn't track child indexes."""
return 0
def setChildIndex(self, index):
"""BaseTree doesn't track child indexes."""
pass
def getParent(self):
"""BaseTree doesn't track parent pointers."""
return None
def setParent(self, t):
"""BaseTree doesn't track parent pointers."""
pass
def hasAncestor(self, ttype):
"""Walk upwards looking for ancestor with this token type."""
return self.getAncestor(ttype) is not None
def getAncestor(self, ttype):
"""Walk upwards and get first ancestor with this token type."""
t = self.getParent()
while t is not None:
if t.getType() == ttype:
return t
t = t.getParent()
return None
def getAncestors(self):
"""Return a list of all ancestors of this node.
The first node of list is the root and the last is the parent of
this node.
"""
if self.getParent() is None:
return None
ancestors = []
t = self.getParent()
while t is not None:
ancestors.insert(0, t) # insert at start
t = t.getParent()
return ancestors
def toStringTree(self):
"""Print out a whole tree not just a node"""
if len(self.children) == 0:
return self.toString()
buf = []
if not self.isNil():
buf.append('(')
buf.append(self.toString())
buf.append(' ')
for i, child in enumerate(self.children):
if i > 0:
buf.append(' ')
buf.append(child.toStringTree())
if not self.isNil():
buf.append(')')
return ''.join(buf)
def getLine(self):
return 0
def getCharPositionInLine(self):
return 0
def toString(self):
"""Override to say how a node (not a tree) should look as text"""
raise NotImplementedError
class BaseTreeAdaptor(TreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
"""
# BaseTreeAdaptor is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def nil(self):
return self.createWithPayload(None)
def errorNode(self, input, start, stop, exc):
"""
create tree node that holds the start and stop tokens associated
with an error.
If you specify your own kind of tree nodes, you will likely have to
override this method. CommonTree returns Token.INVALID_TOKEN_TYPE
if no token payload but you might have to set token type for diff
node type.
You don't have to subclass CommonErrorNode; you will likely need to
subclass your own tree node class to avoid class cast exception.
"""
return CommonErrorNode(input, start, stop, exc)
def isNil(self, tree):
return tree.isNil()
def dupTree(self, t, parent=None):
"""
This is generic in the sense that it will work with any kind of
tree (not just Tree interface). It invokes the adaptor routines
not the tree node routines to do the construction.
"""
if t is None:
return None
newTree = self.dupNode(t)
# ensure new subtree root has parent/child index set
# same index in new tree
self.setChildIndex(newTree, self.getChildIndex(t))
self.setParent(newTree, parent)
for i in range(self.getChildCount(t)):
child = self.getChild(t, i)
newSubTree = self.dupTree(child, t)
self.addChild(newTree, newSubTree)
return newTree
def addChild(self, tree, child):
"""
Add a child to the tree t. If child is a flat tree (a list), make all
in list children of t. Warning: if t has no children, but child does
and child isNil then you can decide it is ok to move children to t via
t.children = child.children; i.e., without copying the array. Just
make sure that this is consistent with have the user will build
ASTs.
"""
#if isinstance(child, Token):
# child = self.createWithPayload(child)
if tree is not None and child is not None:
tree.addChild(child)
def becomeRoot(self, newRoot, oldRoot):
"""
If oldRoot is a nil root, just copy or move the children to newRoot.
If not a nil root, make oldRoot a child of newRoot.
old=^(nil a b c), new=r yields ^(r a b c)
old=^(a b c), new=r yields ^(r ^(a b c))
If newRoot is a nil-rooted single child tree, use the single
child as the new root node.
old=^(nil a b c), new=^(nil r) yields ^(r a b c)
old=^(a b c), new=^(nil r) yields ^(r ^(a b c))
If oldRoot was null, it's ok, just return newRoot (even if isNil).
old=null, new=r yields r
old=null, new=^(nil r) yields ^(nil r)
Return newRoot. Throw an exception if newRoot is not a
simple node or nil root with a single child node--it must be a root
node. If newRoot is ^(nil x) return x as newRoot.
Be advised that it's ok for newRoot to point at oldRoot's
children; i.e., you don't have to copy the list. We are
constructing these nodes so we should have this control for
efficiency.
"""
if isinstance(newRoot, Token):
newRoot = self.create(newRoot)
if oldRoot is None:
return newRoot
if not isinstance(newRoot, CommonTree):
newRoot = self.createWithPayload(newRoot)
# handle ^(nil real-node)
if newRoot.isNil():
nc = newRoot.getChildCount()
if nc == 1:
newRoot = newRoot.getChild(0)
elif nc > 1:
# TODO: make tree run time exceptions hierarchy
raise RuntimeError("more than one node as root")
# add oldRoot to newRoot; addChild takes care of case where oldRoot
# is a flat list (i.e., nil-rooted tree). All children of oldRoot
# are added to newRoot.
newRoot.addChild(oldRoot)
return newRoot
def rulePostProcessing(self, root):
"""Transform ^(nil x) to x and nil to null"""
if root is not None and root.isNil():
if root.getChildCount() == 0:
root = None
elif root.getChildCount() == 1:
root = root.getChild(0)
# whoever invokes rule will set parent and child index
root.setParent(None)
root.setChildIndex(-1)
return root
def createFromToken(self, tokenType, fromToken, text=None):
if fromToken is None:
return self.createFromType(tokenType, text)
assert isinstance(tokenType, int), type(tokenType).__name__
assert isinstance(fromToken, Token), type(fromToken).__name__
assert text is None or isinstance(text, str), type(text).__name__
fromToken = self.createToken(fromToken)
fromToken.type = tokenType
if text is not None:
fromToken.text = text
t = self.createWithPayload(fromToken)
return t
def createFromType(self, tokenType, text):
assert isinstance(tokenType, int), type(tokenType).__name__
assert isinstance(text, str) or text is None, type(text).__name__
fromToken = self.createToken(tokenType=tokenType, text=text)
t = self.createWithPayload(fromToken)
return t
def getType(self, t):
return t.getType()
def setType(self, t, type):
raise RuntimeError("don't know enough about Tree node")
def getText(self, t):
return t.getText()
def setText(self, t, text):
raise RuntimeError("don't know enough about Tree node")
def getChild(self, t, i):
return t.getChild(i)
def setChild(self, t, i, child):
t.setChild(i, child)
def deleteChild(self, t, i):
return t.deleteChild(i)
def getChildCount(self, t):
return t.getChildCount()
def getUniqueID(self, node):
return hash(node)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
raise NotImplementedError
############################################################################
#
# common tree implementation
#
# Tree
# \- BaseTree
# \- CommonTree
# \- CommonErrorNode
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class CommonTree(BaseTree):
"""@brief A tree node that is wrapper for a Token object.
After 3.0 release
while building tree rewrite stuff, it became clear that computing
parent and child index is very difficult and cumbersome. Better to
spend the space in every tree node. If you don't want these extra
fields, it's easy to cut them out in your own BaseTree subclass.
"""
def __init__(self, payload):
BaseTree.__init__(self)
# What token indexes bracket all tokens associated with this node
# and below?
self.startIndex = -1
self.stopIndex = -1
# Who is the parent node of this node; if null, implies node is root
self.parent = None
# What index is this node in the child list? Range: 0..n-1
self.childIndex = -1
# A single token is the payload
if payload is None:
self.token = None
elif isinstance(payload, CommonTree):
self.token = payload.token
self.startIndex = payload.startIndex
self.stopIndex = payload.stopIndex
elif payload is None or isinstance(payload, Token):
self.token = payload
else:
raise TypeError(type(payload).__name__)
def getToken(self):
return self.token
def dupNode(self):
return CommonTree(self)
def isNil(self):
return self.token is None
def getType(self):
if self.token is None:
return INVALID_TOKEN_TYPE
return self.token.type
type = property(getType)
def getText(self):
if self.token is None:
return None
return self.token.text
text = property(getText)
def getLine(self):
if self.token is None or self.token.line == 0:
if self.getChildCount():
return self.getChild(0).getLine()
else:
return 0
return self.token.line
line = property(getLine)
def getCharPositionInLine(self):
if self.token is None or self.token.charPositionInLine == -1:
if self.getChildCount():
return self.getChild(0).getCharPositionInLine()
else:
return 0
else:
return self.token.charPositionInLine
charPositionInLine = property(getCharPositionInLine)
def getTokenStartIndex(self):
if self.startIndex == -1 and self.token:
return self.token.index
return self.startIndex
def setTokenStartIndex(self, index):
self.startIndex = index
tokenStartIndex = property(getTokenStartIndex, setTokenStartIndex)
def getTokenStopIndex(self):
if self.stopIndex == -1 and self.token:
return self.token.index
return self.stopIndex
def setTokenStopIndex(self, index):
self.stopIndex = index
tokenStopIndex = property(getTokenStopIndex, setTokenStopIndex)
def setUnknownTokenBoundaries(self):
"""For every node in this subtree, make sure it's start/stop token's
are set. Walk depth first, visit bottom up. Only updates nodes
with at least one token index < 0.
"""
if self.children is None:
if self.startIndex < 0 or self.stopIndex < 0:
self.startIndex = self.stopIndex = self.token.index
return
for child in self.children:
child.setUnknownTokenBoundaries()
if self.startIndex >= 0 and self.stopIndex >= 0:
# already set
return
if self.children:
firstChild = self.children[0]
lastChild = self.children[-1]
self.startIndex = firstChild.getTokenStartIndex()
self.stopIndex = lastChild.getTokenStopIndex()
def getChildIndex(self):
#FIXME: mark as deprecated
return self.childIndex
def setChildIndex(self, idx):
#FIXME: mark as deprecated
self.childIndex = idx
def getParent(self):
#FIXME: mark as deprecated
return self.parent
def setParent(self, t):
#FIXME: mark as deprecated
self.parent = t
def toString(self):
if self.isNil():
return "nil"
if self.getType() == INVALID_TOKEN_TYPE:
return "<errornode>"
return self.token.text
__str__ = toString
def toStringTree(self):
if not self.children:
return self.toString()
ret = ''
if not self.isNil():
ret += '({!s} '.format(self)
ret += ' '.join([child.toStringTree() for child in self.children])
if not self.isNil():
ret += ')'
return ret
INVALID_NODE = CommonTree(INVALID_TOKEN)
class CommonErrorNode(CommonTree):
"""A node representing erroneous token range in token stream"""
def __init__(self, input, start, stop, exc):
CommonTree.__init__(self, None)
if (stop is None or (stop.index < start.index and stop.type != EOF)):
# sometimes resync does not consume a token (when LT(1) is
# in follow set. So, stop will be 1 to left to start. adjust.
# Also handle case where start is the first token and no token
# is consumed during recovery; LT(-1) will return null.
stop = start
self.input = input
self.start = start
self.stop = stop
self.trappedException = exc
def isNil(self):
return False
def getType(self):
return INVALID_TOKEN_TYPE
def getText(self):
if isinstance(self.start, Token):
i = self.start.index
j = self.stop.index
if self.stop.type == EOF:
j = self.input.size()
badText = self.input.toString(i, j)
elif isinstance(self.start, Tree):
badText = self.input.toString(self.start, self.stop)
else:
# people should subclass if they alter the tree type so this
# next one is for sure correct.
badText = "<unknown>"
return badText
def toString(self):
if isinstance(self.trappedException, MissingTokenException):
return ("<missing type: "
+ str(self.trappedException.getMissingType())
+ ">")
elif isinstance(self.trappedException, UnwantedTokenException):
return ("<extraneous: "
+ str(self.trappedException.getUnexpectedToken())
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, MismatchedTokenException):
return ("<mismatched token: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
elif isinstance(self.trappedException, NoViableAltException):
return ("<unexpected: "
+ str(self.trappedException.token)
+ ", resync=" + self.getText() + ">")
return "<error: "+self.getText()+">"
__str__ = toString
class CommonTreeAdaptor(BaseTreeAdaptor):
"""
@brief A TreeAdaptor that works with any Tree implementation.
It provides
really just factory methods; all the work is done by BaseTreeAdaptor.
If you would like to have different tokens created than ClassicToken
objects, you need to override this and then set the parser tree adaptor to
use your subclass.
To get your parser to build nodes of a different type, override
create(Token), errorNode(), and to be safe, YourTreeClass.dupNode().
dupNode is called to duplicate nodes during rewrite operations.
"""
def dupNode(self, treeNode):
"""
Duplicate a node. This is part of the factory;
override if you want another kind of node to be built.
I could use reflection to prevent having to override this
but reflection is slow.
"""
if treeNode is None:
return None
return treeNode.dupNode()
def createWithPayload(self, payload):
return CommonTree(payload)
def createToken(self, fromToken=None, tokenType=None, text=None):
"""
Tell me how to create a token for use with imaginary token nodes.
For example, there is probably no input symbol associated with imaginary
token DECL, but you need to create it as a payload or whatever for
the DECL node as in ^(DECL type ID).
If you care what the token payload objects' type is, you should
override this method and any other createToken variant.
"""
if fromToken is not None:
return CommonToken(oldToken=fromToken)
return CommonToken(type=tokenType, text=text)
def setTokenBoundaries(self, t, startToken, stopToken):
"""
Track start/stop token for subtree root created for a rule.
Only works with Tree nodes. For rules that match nothing,
seems like this will yield start=i and stop=i-1 in a nil node.
Might be useful info so I'll not force to be i..i.
"""
if t is None:
return
start = 0
stop = 0
if startToken is not None:
start = startToken.index
if stopToken is not None:
stop = stopToken.index
t.setTokenStartIndex(start)
t.setTokenStopIndex(stop)
def getTokenStartIndex(self, t):
if t is None:
return -1
return t.getTokenStartIndex()
def getTokenStopIndex(self, t):
if t is None:
return -1
return t.getTokenStopIndex()
def getText(self, t):
if t is None:
return None
return t.text
def getType(self, t):
if t is None:
return INVALID_TOKEN_TYPE
return t.type
def getToken(self, t):
"""
What is the Token associated with this node? If
you are not using CommonTree, then you must
override this in your own adaptor.
"""
if isinstance(t, CommonTree):
return t.getToken()
return None # no idea what to do
def getChild(self, t, i):
if t is None:
return None
return t.getChild(i)
def getChildCount(self, t):
if t is None:
return 0
return t.getChildCount()
def getParent(self, t):
return t.getParent()
def setParent(self, t, parent):
t.setParent(parent)
def getChildIndex(self, t):
if t is None:
return 0
return t.getChildIndex()
def setChildIndex(self, t, index):
t.setChildIndex(index)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
parent.replaceChildren(startChildIndex, stopChildIndex, t)
############################################################################
#
# streams
#
# TreeNodeStream
# \- BaseTree
# \- CommonTree
#
# TreeAdaptor
# \- BaseTreeAdaptor
# \- CommonTreeAdaptor
#
############################################################################
class TreeNodeStream(IntStream):
"""@brief A stream of tree nodes
It accessing nodes from a tree of some kind.
"""
# TreeNodeStream is abstract, no need to complain about not implemented
# abstract methods
# pylint: disable-msg=W0223
def get(self, i):
"""Get a tree node at an absolute index i; 0..n-1.
If you don't want to buffer up nodes, then this method makes no
sense for you.
"""
raise NotImplementedError
def LT(self, k):
"""
Get tree node at current input pointer + i ahead where i=1 is next node.
i<0 indicates nodes in the past. So LT(-1) is previous node, but
implementations are not required to provide results for k < -1.
LT(0) is undefined. For i>=n, return null.
Return null for LT(0) and any index that results in an absolute address
that is negative.
This is analogous to the LT() method of the TokenStream, but this
returns a tree node instead of a token. Makes code gen identical
for both parser and tree grammars. :)
"""
raise NotImplementedError
def getTreeSource(self):
"""
Where is this stream pulling nodes from? This is not the name, but
the object that provides node objects.
"""
raise NotImplementedError
def getTokenStream(self):
"""
If the tree associated with this stream was created from a TokenStream,
you can specify it here. Used to do rule $text attribute in tree
parser. Optional unless you use tree parser rule text attribute
or output=template and rewrite=true options.
"""
raise NotImplementedError
def getTreeAdaptor(self):
"""
What adaptor can tell me how to interpret/navigate nodes and
trees. E.g., get text of a node.
"""
raise NotImplementedError
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so we have to instantiate new ones. When doing normal tree
parsing, it's slow and a waste of memory to create unique
navigation nodes. Default should be false;
"""
raise NotImplementedError
def reset(self):
"""
Reset the tree node stream in such a way that it acts like
a freshly constructed stream.
"""
raise NotImplementedError
def toString(self, start, stop):
"""
Return the text of all nodes from start to stop, inclusive.
If the stream does not buffer all the nodes then it can still
walk recursively from start until stop. You can always return
null or "" too, but users should not access $ruleLabel.text in
an action of course in that case.
"""
raise NotImplementedError
# REWRITING TREES (used by tree parser)
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
"""
Replace from start to stop child index of parent with t, which might
be a list. Number of children may be different
after this call. The stream is notified because it is walking the
tree and might need to know you are monkeying with the underlying
tree. Also, it might be able to modify the node stream to avoid
restreaming for future phases.
If parent is null, don't do anything; must be at root of overall tree.
Can't replace whatever points to the parent externally. Do nothing.
"""
raise NotImplementedError
class CommonTreeNodeStream(TreeNodeStream):
"""@brief A buffered stream of tree nodes.
Nodes can be from a tree of ANY kind.
This node stream sucks all nodes out of the tree specified in
the constructor during construction and makes pointers into
the tree using an array of Object pointers. The stream necessarily
includes pointers to DOWN and UP and EOF nodes.
This stream knows how to mark/release for backtracking.
This stream is most suitable for tree interpreters that need to
jump around a lot or for tree parsers requiring speed (at cost of memory).
There is some duplicated functionality here with UnBufferedTreeNodeStream
but just in bookkeeping, not tree walking etc...
@see UnBufferedTreeNodeStream
"""
def __init__(self, *args):
TreeNodeStream.__init__(self)
if len(args) == 1:
adaptor = CommonTreeAdaptor()
tree = args[0]
nodes = None
down = None
up = None
eof = None
elif len(args) == 2:
adaptor = args[0]
tree = args[1]
nodes = None
down = None
up = None
eof = None
elif len(args) == 3:
parent = args[0]
start = args[1]
stop = args[2]
adaptor = parent.adaptor
tree = parent.root
nodes = parent.nodes[start:stop]
down = parent.down
up = parent.up
eof = parent.eof
else:
raise TypeError("Invalid arguments")
# all these navigation nodes are shared and hence they
# cannot contain any line/column info
if down is not None:
self.down = down
else:
self.down = adaptor.createFromType(DOWN, "DOWN")
if up is not None:
self.up = up
else:
self.up = adaptor.createFromType(UP, "UP")
if eof is not None:
self.eof = eof
else:
self.eof = adaptor.createFromType(EOF, "EOF")
# The complete mapping from stream index to tree node.
# This buffer includes pointers to DOWN, UP, and EOF nodes.
# It is built upon ctor invocation. The elements are type
# Object as we don't what the trees look like.
# Load upon first need of the buffer so we can set token types
# of interest for reverseIndexing. Slows us down a wee bit to
# do all of the if p==-1 testing everywhere though.
if nodes is not None:
self.nodes = nodes
else:
self.nodes = []
# Pull nodes from which tree?
self.root = tree
# IF this tree (root) was created from a token stream, track it.
self.tokens = None
# What tree adaptor was used to build these trees
self.adaptor = adaptor
# Reuse same DOWN, UP navigation nodes unless this is true
self.uniqueNavigationNodes = False
# The index into the nodes list of the current node (next node
# to consume). If -1, nodes array not filled yet.
self.p = -1
# Track the last mark() call result value for use in rewind().
self.lastMarker = None
# Stack of indexes used for push/pop calls
self.calls = []
def fillBuffer(self):
"""Walk tree with depth-first-search and fill nodes buffer.
Don't do DOWN, UP nodes if its a list (t is isNil).
"""
self._fillBuffer(self.root)
self.p = 0 # buffer of nodes intialized now
def _fillBuffer(self, t):
nil = self.adaptor.isNil(t)
if not nil:
self.nodes.append(t) # add this node
# add DOWN node if t has children
n = self.adaptor.getChildCount(t)
if not nil and n > 0:
self.addNavigationNode(DOWN)
# and now add all its children
for c in range(n):
self._fillBuffer(self.adaptor.getChild(t, c))
# add UP node if t has children
if not nil and n > 0:
self.addNavigationNode(UP)
def getNodeIndex(self, node):
"""What is the stream index for node? 0..n-1
Return -1 if node not found.
"""
if self.p == -1:
self.fillBuffer()
for i, t in enumerate(self.nodes):
if t == node:
return i
return -1
def addNavigationNode(self, ttype):
"""
As we flatten the tree, we use UP, DOWN nodes to represent
the tree structure. When debugging we need unique nodes
so instantiate new ones when uniqueNavigationNodes is true.
"""
navNode = None
if ttype == DOWN:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(DOWN, "DOWN")
else:
navNode = self.down
else:
if self.hasUniqueNavigationNodes():
navNode = self.adaptor.createFromType(UP, "UP")
else:
navNode = self.up
self.nodes.append(navNode)
def get(self, i):
if self.p == -1:
self.fillBuffer()
return self.nodes[i]
def LT(self, k):
if self.p == -1:
self.fillBuffer()
if k == 0:
return None
if k < 0:
return self.LB(-k)
if self.p + k - 1 >= len(self.nodes):
return self.eof
return self.nodes[self.p + k - 1]
def getCurrentSymbol(self):
return self.LT(1)
def LB(self, k):
"""Look backwards k nodes"""
if k == 0:
return None
if self.p - k < 0:
return None
return self.nodes[self.p - k]
def isEOF(self, obj):
return self.adaptor.getType(obj) == EOF
def getTreeSource(self):
return self.root
def getSourceName(self):
return self.getTokenStream().getSourceName()
def getTokenStream(self):
return self.tokens
def setTokenStream(self, tokens):
self.tokens = tokens
def getTreeAdaptor(self):
return self.adaptor
def hasUniqueNavigationNodes(self):
return self.uniqueNavigationNodes
def setUniqueNavigationNodes(self, uniqueNavigationNodes):
self.uniqueNavigationNodes = uniqueNavigationNodes
def consume(self):
if self.p == -1:
self.fillBuffer()
self.p += 1
def LA(self, i):
return self.adaptor.getType(self.LT(i))
def mark(self):
if self.p == -1:
self.fillBuffer()
self.lastMarker = self.index()
return self.lastMarker
def release(self, marker=None):
# no resources to release
pass
def index(self):
return self.p
def rewind(self, marker=None):
if marker is None:
marker = self.lastMarker
self.seek(marker)
def seek(self, index):
if self.p == -1:
self.fillBuffer()
self.p = index
def push(self, index):
"""
Make stream jump to a new location, saving old location.
Switch back with pop().
"""
self.calls.append(self.p) # save current index
self.seek(index)
def pop(self):
"""
Seek back to previous index saved during last push() call.
Return top of stack (return index).
"""
ret = self.calls.pop(-1)
self.seek(ret)
return ret
def reset(self):
self.p = 0
self.lastMarker = 0
self.calls = []
def size(self):
if self.p == -1:
self.fillBuffer()
return len(self.nodes)
# TREE REWRITE INTERFACE
def replaceChildren(self, parent, startChildIndex, stopChildIndex, t):
if parent is not None:
self.adaptor.replaceChildren(
parent, startChildIndex, stopChildIndex, t
)
def __str__(self):
"""Used for testing, just return the token type stream"""
if self.p == -1:
self.fillBuffer()
return ' '.join([str(self.adaptor.getType(node))
for node in self.nodes
])
def toString(self, start, stop):
if start is None or stop is None:
return None
if self.p == -1:
self.fillBuffer()
#System.out.println("stop: "+stop);
#if ( start instanceof CommonTree )
# System.out.print("toString: "+((CommonTree)start).getToken()+", ");
#else
# System.out.println(start);
#if ( stop instanceof CommonTree )
# System.out.println(((CommonTree)stop).getToken());
#else
# System.out.println(stop);
# if we have the token stream, use that to dump text in order
if self.tokens is not None:
beginTokenIndex = self.adaptor.getTokenStartIndex(start)
endTokenIndex = self.adaptor.getTokenStopIndex(stop)
# if it's a tree, use start/stop index from start node
# else use token range from start/stop nodes
if self.adaptor.getType(stop) == UP:
endTokenIndex = self.adaptor.getTokenStopIndex(start)
elif self.adaptor.getType(stop) == EOF:
endTokenIndex = self.size() -2 # don't use EOF
return self.tokens.toString(beginTokenIndex, endTokenIndex)
# walk nodes looking for start
i, t = 0, None
for i, t in enumerate(self.nodes):
if t == start:
break
# now walk until we see stop, filling string buffer with text
buf = []
t = self.nodes[i]
while t != stop:
text = self.adaptor.getText(t)
if text is None:
text = " " + self.adaptor.getType(t)
buf.append(text)
i += 1
t = self.nodes[i]
# include stop node too
text = self.adaptor.getText(stop)
if text is None:
text = " " +self.adaptor.getType(stop)
buf.append(text)
return ''.join(buf)
## iterator interface
def __iter__(self):
if self.p == -1:
self.fillBuffer()
for node in self.nodes:
yield node
#############################################################################
#
# tree parser
#
#############################################################################
class TreeParser(BaseRecognizer):
"""@brief Baseclass for generated tree parsers.
A parser for a stream of tree nodes. "tree grammars" result in a subclass
of this. All the error reporting and recovery is shared with Parser via
the BaseRecognizer superclass.
"""
def __init__(self, input, state=None):
BaseRecognizer.__init__(self, state)
self.input = None
self.setTreeNodeStream(input)
def reset(self):
BaseRecognizer.reset(self) # reset all recognizer state variables
if self.input is not None:
self.input.seek(0) # rewind the input
def setTreeNodeStream(self, input):
"""Set the input stream"""
self.input = input
def getTreeNodeStream(self):
return self.input
def getSourceName(self):
return self.input.getSourceName()
def getCurrentInputSymbol(self, input):
return input.LT(1)
def getMissingSymbol(self, input, e, expectedTokenType, follow):
tokenText = "<missing " + self.tokenNames[expectedTokenType] + ">"
adaptor = input.adaptor
return adaptor.createToken(
CommonToken(type=expectedTokenType, text=tokenText))
# precompiled regex used by inContext
dotdot = ".*[^.]\\.\\.[^.].*"
doubleEtc = ".*\\.\\.\\.\\s+\\.\\.\\..*"
dotdotPattern = re.compile(dotdot)
doubleEtcPattern = re.compile(doubleEtc)
def inContext(self, context, adaptor=None, tokenName=None, t=None):
"""Check if current node in input has a context.
Context means sequence of nodes towards root of tree. For example,
you might say context is "MULT" which means my parent must be MULT.
"CLASS VARDEF" says current node must be child of a VARDEF and whose
parent is a CLASS node. You can use "..." to mean zero-or-more nodes.
"METHOD ... VARDEF" means my parent is VARDEF and somewhere above
that is a METHOD node. The first node in the context is not
necessarily the root. The context matcher stops matching and returns
true when it runs out of context. There is no way to force the first
node to be the root.
"""
return self._inContext(
self.input.getTreeAdaptor(), self.tokenNames,
self.input.LT(1), context)
@classmethod
def _inContext(cls, adaptor, tokenNames, t, context):
"""The worker for inContext.
It's static and full of parameters for testing purposes.
"""
if cls.dotdotPattern.match(context):
# don't allow "..", must be "..."
raise ValueError("invalid syntax: ..")
if cls.doubleEtcPattern.match(context):
# don't allow double "..."
raise ValueError("invalid syntax: ... ...")
# ensure spaces around ...
context = context.replace("...", " ... ")
context = context.strip()
nodes = context.split()
ni = len(nodes) - 1
t = adaptor.getParent(t)
while ni >= 0 and t is not None:
if nodes[ni] == "...":
# walk upwards until we see nodes[ni-1] then continue walking
if ni == 0:
# ... at start is no-op
return True
goal = nodes[ni-1]
ancestor = cls._getAncestor(adaptor, tokenNames, t, goal)
if ancestor is None:
return False
t = ancestor
ni -= 1
name = tokenNames[adaptor.getType(t)]
if name != nodes[ni]:
return False
# advance to parent and to previous element in context node list
ni -= 1
t = adaptor.getParent(t)
# at root but more nodes to match
if t is None and ni >= 0:
return False
return True
@staticmethod
def _getAncestor(adaptor, tokenNames, t, goal):
"""Helper for static inContext."""
while t is not None:
name = tokenNames[adaptor.getType(t)]
if name == goal:
return t
t = adaptor.getParent(t)
return None
def matchAny(self):
"""
Match '.' in tree parser has special meaning. Skip node or
entire tree if node has children. If children, scan until
corresponding UP node.
"""
self._state.errorRecovery = False
look = self.input.LT(1)
if self.input.getTreeAdaptor().getChildCount(look) == 0:
self.input.consume() # not subtree, consume 1 node and return
return
# current node is a subtree, skip to corresponding UP.
# must count nesting level to get right UP
level = 0
tokenType = self.input.getTreeAdaptor().getType(look)
while tokenType != EOF and not (tokenType == UP and level==0):
self.input.consume()
look = self.input.LT(1)
tokenType = self.input.getTreeAdaptor().getType(look)
if tokenType == DOWN:
level += 1
elif tokenType == UP:
level -= 1
self.input.consume() # consume UP
def mismatch(self, input, ttype, follow):
"""
We have DOWN/UP nodes in the stream that have no line info; override.
plus we want to alter the exception type. Don't try to recover
from tree parser errors inline...
"""
raise MismatchedTreeNodeException(ttype, input)
def getErrorHeader(self, e):
"""
Prefix error message with the grammar name because message is
always intended for the programmer because the parser built
the input tree not the user.
"""
return (self.getGrammarFileName() +
": node from {}line {}:{}".format(
"after " if e.approximateLineInfo else '',
e.line,
e.charPositionInLine))
def getErrorMessage(self, e):
"""
Tree parsers parse nodes they usually have a token object as
payload. Set the exception token and do the default behavior.
"""
if isinstance(self, TreeParser):
adaptor = e.input.getTreeAdaptor()
e.token = adaptor.getToken(e.node)
if e.token is not None: # could be an UP/DOWN node
e.token = CommonToken(
type=adaptor.getType(e.node),
text=adaptor.getText(e.node)
)
return BaseRecognizer.getErrorMessage(self, e)
def traceIn(self, ruleName, ruleIndex):
BaseRecognizer.traceIn(self, ruleName, ruleIndex, self.input.LT(1))
def traceOut(self, ruleName, ruleIndex):
BaseRecognizer.traceOut(self, ruleName, ruleIndex, self.input.LT(1))
#############################################################################
#
# tree visitor
#
#############################################################################
class TreeVisitor(object):
"""Do a depth first walk of a tree, applying pre() and post() actions
we go.
"""
def __init__(self, adaptor=None):
if adaptor is not None:
self.adaptor = adaptor
else:
self.adaptor = CommonTreeAdaptor()
def visit(self, t, pre_action=None, post_action=None):
"""Visit every node in tree t and trigger an action for each node
before/after having visited all of its children. Bottom up walk.
Execute both actions even if t has no children. Ignore return
results from transforming children since they will have altered
the child list of this node (their parent). Return result of
applying post action to this node.
The Python version differs from the Java version by taking two
callables 'pre_action' and 'post_action' instead of a class instance
that wraps those methods. Those callables must accept a TreeNode as
their single argument and return the (potentially transformed or
replaced) TreeNode.
"""
isNil = self.adaptor.isNil(t)
if pre_action is not None and not isNil:
# if rewritten, walk children of new t
t = pre_action(t)
idx = 0
while idx < self.adaptor.getChildCount(t):
child = self.adaptor.getChild(t, idx)
self.visit(child, pre_action, post_action)
idx += 1
if post_action is not None and not isNil:
t = post_action(t)
return t
#############################################################################
#
# tree iterator
#
#############################################################################
class TreeIterator(object):
"""
Return a node stream from a doubly-linked tree whose nodes
know what child index they are.
Emit navigation nodes (DOWN, UP, and EOF) to let show tree structure.
"""
def __init__(self, tree, adaptor=None):
if adaptor is None:
adaptor = CommonTreeAdaptor()
self.root = tree
self.adaptor = adaptor
self.first_time = True
self.tree = tree
# If we emit UP/DOWN nodes, we need to spit out multiple nodes per
# next() call.
self.nodes = []
# navigation nodes to return during walk and at end
self.down = adaptor.createFromType(DOWN, "DOWN")
self.up = adaptor.createFromType(UP, "UP")
self.eof = adaptor.createFromType(EOF, "EOF")
def reset(self):
self.first_time = True
self.tree = self.root
self.nodes = []
def __iter__(self):
return self
def has_next(self):
if self.first_time:
return self.root is not None
if len(self.nodes) > 0:
return True
if self.tree is None:
return False
if self.adaptor.getChildCount(self.tree) > 0:
return True
# back at root?
return self.adaptor.getParent(self.tree) is not None
def __next__(self):
if not self.has_next():
raise StopIteration
if self.first_time:
# initial condition
self.first_time = False
if self.adaptor.getChildCount(self.tree) == 0:
# single node tree (special)
self.nodes.append(self.eof)
return self.tree
return self.tree
# if any queued up, use those first
if len(self.nodes) > 0:
return self.nodes.pop(0)
# no nodes left?
if self.tree is None:
return self.eof
# next node will be child 0 if any children
if self.adaptor.getChildCount(self.tree) > 0:
self.tree = self.adaptor.getChild(self.tree, 0)
# real node is next after DOWN
self.nodes.append(self.tree)
return self.down
# if no children, look for next sibling of tree or ancestor
parent = self.adaptor.getParent(self.tree)
# while we're out of siblings, keep popping back up towards root
while (parent is not None
and self.adaptor.getChildIndex(self.tree)+1 >= self.adaptor.getChildCount(parent)):
# we're moving back up
self.nodes.append(self.up)
self.tree = parent
parent = self.adaptor.getParent(self.tree)
# no nodes left?
if parent is None:
self.tree = None # back at root? nothing left then
self.nodes.append(self.eof) # add to queue, might have UP nodes in there
return self.nodes.pop(0)
# must have found a node with an unvisited sibling
# move to it and return it
nextSiblingIndex = self.adaptor.getChildIndex(self.tree) + 1
self.tree = self.adaptor.getChild(parent, nextSiblingIndex)
self.nodes.append(self.tree) # add to queue, might have UP nodes in there
return self.nodes.pop(0)
#############################################################################
#
# streams for rule rewriting
#
#############################################################################
class RewriteRuleElementStream(object):
"""@brief Internal helper class.
A generic list of elements tracked in an alternative to be used in
a -> rewrite rule. We need to subclass to fill in the next() method,
which returns either an AST node wrapped around a token payload or
an existing subtree.
Once you start next()ing, do not try to add more elements. It will
break the cursor tracking I believe.
@see org.antlr.runtime.tree.RewriteRuleSubtreeStream
@see org.antlr.runtime.tree.RewriteRuleTokenStream
TODO: add mechanism to detect/puke on modification after reading from
stream
"""
def __init__(self, adaptor, elementDescription, elements=None):
# Cursor 0..n-1. If singleElement!=null, cursor is 0 until you next(),
# which bumps it to 1 meaning no more elements.
self.cursor = 0
# Track single elements w/o creating a list. Upon 2nd add, alloc list
self.singleElement = None
# The list of tokens or subtrees we are tracking
self.elements = None
# Once a node / subtree has been used in a stream, it must be dup'd
# from then on. Streams are reset after subrules so that the streams
# can be reused in future subrules. So, reset must set a dirty bit.
# If dirty, then next() always returns a dup.
self.dirty = False
# The element or stream description; usually has name of the token or
# rule reference that this list tracks. Can include rulename too, but
# the exception would track that info.
self.elementDescription = elementDescription
self.adaptor = adaptor
if isinstance(elements, (list, tuple)):
# Create a stream, but feed off an existing list
self.singleElement = None
self.elements = elements
else:
# Create a stream with one element
self.add(elements)
def reset(self):
"""
Reset the condition of this stream so that it appears we have
not consumed any of its elements. Elements themselves are untouched.
Once we reset the stream, any future use will need duplicates. Set
the dirty bit.
"""
self.cursor = 0
self.dirty = True
def add(self, el):
if el is None:
return
if self.elements is not None: # if in list, just add
self.elements.append(el)
return
if self.singleElement is None: # no elements yet, track w/o list
self.singleElement = el
return
# adding 2nd element, move to list
self.elements = []
self.elements.append(self.singleElement)
self.singleElement = None
self.elements.append(el)
def nextTree(self):
"""
Return the next element in the stream. If out of elements, throw
an exception unless size()==1. If size is 1, then return elements[0].
Return a duplicate node/subtree if stream is out of elements and
size==1. If we've already used the element, dup (dirty bit set).
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup
el = self._next()
return self.dup(el)
# test size above then fetch
el = self._next()
return el
def _next(self):
"""
do the work of getting the next element, making sure that it's
a tree node or subtree. Deal with the optimization of single-
element list versus list of size > 1. Throw an exception
if the stream is empty or we're out of elements and size>1.
protected so you can override in a subclass if necessary.
"""
if len(self) == 0:
raise RewriteEmptyStreamException(self.elementDescription)
if self.cursor >= len(self): # out of elements?
if len(self) == 1: # if size is 1, it's ok; return and we'll dup
return self.toTree(self.singleElement)
# out of elements and size was not 1, so we can't dup
raise RewriteCardinalityException(self.elementDescription)
# we have elements
if self.singleElement is not None:
self.cursor += 1 # move cursor even for single element list
return self.toTree(self.singleElement)
# must have more than one in list, pull from elements
o = self.toTree(self.elements[self.cursor])
self.cursor += 1
return o
def dup(self, el):
"""
When constructing trees, sometimes we need to dup a token or AST
subtree. Dup'ing a token means just creating another AST node
around it. For trees, you must call the adaptor.dupTree() unless
the element is for a tree root; then it must be a node dup.
"""
raise NotImplementedError
def toTree(self, el):
"""
Ensure stream emits trees; tokens must be converted to AST nodes.
AST nodes can be passed through unmolested.
"""
return el
def hasNext(self):
return ( (self.singleElement is not None and self.cursor < 1)
or (self.elements is not None
and self.cursor < len(self.elements)
)
)
def size(self):
if self.singleElement is not None:
return 1
if self.elements is not None:
return len(self.elements)
return 0
__len__ = size
def getDescription(self):
"""Deprecated. Directly access elementDescription attribute"""
return self.elementDescription
class RewriteRuleTokenStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def toTree(self, el):
# Don't convert to a tree unless they explicitly call nextTree.
# This way we can do hetero tree nodes in rewrite.
return el
def nextNode(self):
t = self._next()
return self.adaptor.createWithPayload(t)
def nextToken(self):
return self._next()
def dup(self, el):
raise TypeError("dup can't be called for a token stream.")
class RewriteRuleSubtreeStream(RewriteRuleElementStream):
"""@brief Internal helper class."""
def nextNode(self):
"""
Treat next element as a single node even if it's a subtree.
This is used instead of next() when the result has to be a
tree root node. Also prevents us from duplicating recently-added
children; e.g., ^(type ID)+ adds ID to type and then 2nd iteration
must dup the type node, but ID has been added.
Referencing a rule result twice is ok; dup entire tree as
we can't be adding trees as root; e.g., expr expr.
Hideous code duplication here with super.next(). Can't think of
a proper way to refactor. This needs to always call dup node
and super.next() doesn't know which to call: dup node or dup tree.
"""
if (self.dirty
or (self.cursor >= len(self) and len(self) == 1)
):
# if out of elements and size is 1, dup (at most a single node
# since this is for making root nodes).
el = self._next()
return self.adaptor.dupNode(el)
# test size above then fetch
el = self._next()
while self.adaptor.isNil(el) and self.adaptor.getChildCount(el) == 1:
el = self.adaptor.getChild(el, 0)
# dup just the root (want node here)
return self.adaptor.dupNode(el)
def dup(self, el):
return self.adaptor.dupTree(el)
class RewriteRuleNodeStream(RewriteRuleElementStream):
"""
Queues up nodes matched on left side of -> in a tree parser. This is
the analog of RewriteRuleTokenStream for normal parsers.
"""
def nextNode(self):
return self._next()
def toTree(self, el):
return self.adaptor.dupNode(el)
def dup(self, el):
# we dup every node, so don't have to worry about calling dup; short-
#circuited next() so it doesn't call.
raise TypeError("dup can't be called for a node stream.")
class TreeRuleReturnScope(RuleReturnScope):
"""
This is identical to the ParserRuleReturnScope except that
the start property is a tree nodes not Token object
when you are parsing trees. To be generic the tree node types
have to be Object.
"""
def __init__(self):
super().__init__()
self.start = None
self.tree = None
def getStart(self):
return self.start
def getTree(self):
return self.tree
|
openstack/congress
|
thirdparty/antlr3-antlr-3.5/runtime/Python3/antlr3/tree.py
|
Python
|
apache-2.0
| 81,112
|
[
"VisIt"
] |
ad35412ebdd766e380aa9f8602ba3909daa759983ed5651ae8325a938bb3da24
|
########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
''' This manages remote shared Ansible objects, mainly roles'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.six import string_types
from ansible.errors import AnsibleError
from ansible.utils.display import Display
# default_readme_template
# default_meta_template
class Galaxy(object):
''' Keeps global galaxy info '''
def __init__(self, options, display=None):
if display is None:
self.display = Display()
else:
self.display = display
self.options = options
roles_paths = getattr(self.options, 'roles_path', [])
if isinstance(roles_paths, string_types):
self.roles_paths = [os.path.expanduser(roles_path) for roles_path in roles_paths.split(os.pathsep)]
self.roles = {}
# load data path for resource usage
this_dir, this_filename = os.path.split(__file__)
self.DATA_PATH = os.path.join(this_dir, "data")
#TODO: move to getter for lazy loading
self.default_readme = self._str_from_data_file('readme')
self.default_meta = self._str_from_data_file('metadata_template.j2')
def add_role(self, role):
self.roles[role.name] = role
def remove_role(self, role_name):
del self.roles[role_name]
def _str_from_data_file(self, filename):
myfile = os.path.join(self.DATA_PATH, filename)
try:
return open(myfile).read()
except Exception as e:
raise AnsibleError("Could not open %s: %s" % (filename, str(e)))
|
opoplawski/ansible
|
lib/ansible/galaxy/__init__.py
|
Python
|
gpl-3.0
| 2,460
|
[
"Brian",
"Galaxy"
] |
5fffc52ca3491f5ffcb27ad583ceaec658c7ed7b54a01d983332e94b6d9615e6
|
"""
test MNW2 package
"""
import sys
sys.path.insert(0, '..')
import os
import flopy
import numpy as np
from flopy.utils.flopy_io import line_parse
cpth = os.path.join('temp', 't027')
# make the directory if it does not exist
if not os.path.isdir(cpth):
os.makedirs(cpth)
mf2005pth = os.path.join('..', 'examples', 'data', 'mnw2_examples')
def test_line_parse():
"""t027 test line_parse method in MNW2 Package class"""
# ensure that line_parse is working correctly
# comment handling
line = line_parse('Well-A -1 ; 2a. WELLID,NNODES')
assert line == ['Well-A', '-1']
def test_load():
"""t027 test load of MNW2 Package"""
# load in the test problem (1 well, 3 stress periods)
m = flopy.modflow.Modflow.load('MNW2-Fig28.nam', model_ws=mf2005pth,
verbose=True, forgive=False)
m.change_model_ws(cpth)
assert 'MNW2' in m.get_package_list()
assert 'MNWI' in m.get_package_list()
# load a real mnw2 package from a steady state model (multiple wells)
m2 = flopy.modflow.Modflow('br', model_ws=cpth)
path = os.path.join('..', 'examples', 'data', 'mnw2_examples')
mnw2_2 = flopy.modflow.ModflowMnw2.load(path + '/BadRiver_cal.mnw2', m2)
mnw2_2.write_file(os.path.join(cpth, 'brtest.mnw2'))
m3 = flopy.modflow.Modflow('br', model_ws=cpth)
mnw2_3 = flopy.modflow.ModflowMnw2.load(cpth + '/brtest.mnw2', m3)
mnw2_2.node_data.sort(order='wellid')
mnw2_3.node_data.sort(order='wellid')
assert np.array_equal(mnw2_2.node_data, mnw2_3.node_data)
assert (mnw2_2.stress_period_data[0].qdes - mnw2_3.stress_period_data[
0].qdes).max() < 0.01
assert np.abs(
mnw2_2.stress_period_data[0].qdes - mnw2_3.stress_period_data[
0].qdes).min() < 0.01
def test_make_package():
"""t027 test make MNW2 Package"""
m4 = flopy.modflow.Modflow('mnw2example', model_ws=cpth)
dis = flopy.modflow.ModflowDis(nrow=5, ncol=5, nlay=3, nper=3, top=10,
botm=0, model=m4)
# make the package from the tables (ztop, zbotm format)
node_data = np.array(
[(0, 1, 1, 9.5, 7.1, 'well1', 'skin', -1, 0, 0, 0, 1.0, 2.0, 5.0, 6.2),
(1, 1, 1, 7.1, 5.1, 'well1', 'skin', -1, 0, 0, 0, 0.5, 2.0, 5.0, 6.2),
(
2, 3, 3, 9.1, 3.7, 'well2', 'skin', -1, 0, 0, 0, 1.0, 2.0, 5.0, 4.1)],
dtype=[('index', '<i8'), ('i', '<i8'), ('j', '<i8'),
('ztop', '<f8'), ('zbotm', '<f8'),
('wellid', 'O'), ('losstype', 'O'), ('pumploc', '<i8'),
('qlimit', '<i8'), ('ppflag', '<i8'), ('pumpcap', '<i8'),
('rw', '<f8'), ('rskin', '<f8'), ('kskin', '<f8'),
('zpump', '<f8')]).view(np.recarray)
stress_period_data = {0: np.array([(0, 0, 'well1', 0), (1, 0, 'well2', 0)],
dtype=[('index', '<i8'), ('per', '<i8'),
('wellid', 'O'),
('qdes', '<i8')]).view(
np.recarray),
1: np.array(
[(2, 1, 'well1', 100), (3, 1, 'well2', 1000)],
dtype=[('index', '<i8'), ('per', '<i8'),
('wellid', 'O'), ('qdes', '<i8')]).view(
np.recarray)}
mnw2_4 = flopy.modflow.ModflowMnw2(model=m4, mnwmax=2, nodtot=3,
node_data=node_data,
stress_period_data=stress_period_data,
itmp=[2, 2, -1],
# reuse second per pumping for last stress period
)
m4.write_input()
# make the package from the tables (k, i, j format)
node_data = np.array(
[(0, 3, 1, 1, 'well1', 'skin', -1, 0, 0, 0, 1.0, 2.0, 5.0, 6.2),
(1, 2, 1, 1, 'well1', 'skin', -1, 0, 0, 0, 0.5, 2.0, 5.0, 6.2),
(2, 1, 3, 3, 'well2', 'skin', -1, 0, 0, 0, 1.0, 2.0, 5.0, 4.1)],
dtype=[('index', '<i8'), ('k', '<i8'), ('i', '<i8'), ('j', '<i8'),
('wellid', 'O'), ('losstype', 'O'), ('pumploc', '<i8'),
('qlimit', '<i8'), ('ppflag', '<i8'), ('pumpcap', '<i8'),
('rw', '<f8'), ('rskin', '<f8'), ('kskin', '<f8'),
('zpump', '<f8')]).view(np.recarray)
stress_period_data = {0: np.array([(0, 0, 'well1', 0), (1, 0, 'well2', 0)],
dtype=[('index', '<i8'), ('per', '<i8'),
('wellid', 'O'),
('qdes', '<i8')]).view(
np.recarray),
1: np.array(
[(2, 1, 'well1', 100), (3, 1, 'well2', 1000)],
dtype=[('index', '<i8'), ('per', '<i8'),
('wellid', 'O'), ('qdes', '<i8')]).view(
np.recarray)}
mnw2_4 = flopy.modflow.ModflowMnw2(model=m4, mnwmax=2, nodtot=3,
node_data=node_data,
stress_period_data=stress_period_data,
itmp=[2, 2, -1],
# reuse second per pumping for last stress period
)
spd = m4.mnw2.stress_period_data[0]
inds = spd.k, spd.i, spd.j
assert np.array_equal(np.array(inds).transpose(),
np.array([(2, 1, 1), (1, 3, 3)]))
m4.write_input()
# make the package from the objects
mnw2fromobj = flopy.modflow.ModflowMnw2(model=m4, mnwmax=2,
mnw=mnw2_4.mnw,
itmp=[2, 2, -1],
# reuse second per pumping for last stress period
)
# verify that the two input methods produce the same results
assert np.array_equal(mnw2_4.stress_period_data[1],
mnw2fromobj.stress_period_data[1])
def test_export():
"""t027 test export of MNW2 Package to netcdf files"""
try:
import netCDF4
except:
netCDF4 = None
m = flopy.modflow.Modflow.load('MNW2-Fig28.nam', model_ws=mf2005pth,
load_only=['dis', 'bas6', 'mnwi', 'mnw2',
'wel'], verbose=True,
check=False)
# netDF4 tests
if netCDF4 is not None:
m.wel.export(os.path.join(cpth, 'MNW2-Fig28_well.nc'))
m.mnw2.export(os.path.join(cpth, 'MNW2-Fig28.nc'))
fpth = os.path.join(cpth, 'MNW2-Fig28.nc')
nc = netCDF4.Dataset(fpth)
assert np.array_equal(nc.variables['mnw2_qdes'][:, 0, 29, 40],
np.array([0., -10000., -10000.], dtype='float32'))
assert np.sum(nc.variables['mnw2_rw'][:, :, 29, 40]) - 5.1987 < 1e-4
# need to add shapefile test
def test_checks():
"""t027 test MNW2 Package checks in FloPy"""
m = flopy.modflow.Modflow.load('MNW2-Fig28.nam', model_ws=mf2005pth,
load_only=['dis', 'bas6', 'mnwi', 'wel'],
verbose=True, check=False)
chk = m.check()
assert 'MNWI package present without MNW2 package.' in '.'.join(
chk.summary_array.desc)
if __name__ == '__main__':
#test_line_parse()
#test_load()
#test_make_package()
test_export()
#test_checks()
pass
|
bdestombe/flopy-1
|
autotest/t027_test.py
|
Python
|
bsd-3-clause
| 7,694
|
[
"NetCDF"
] |
6d731c06059e4467b52cc3fb4f2684d3b248d3206d07de85fa44dc2e2e9dc2be
|
# Copyright (c) 2016 GeoSpark
#
# Released under the MIT License (MIT)
# See the LICENSE file, or visit http://opensource.org/licenses/MIT
# Inspired by: https://thoughtsbyclayg.blogspot.co.uk/2008/10/parsing-list-of-numbers-in-python.html
# return a set of selected values when a string in the form:
# 1-4,6
# would return:
# 1,2,3,4,6
# as expected...
def parse_disjoint_range(range_string):
selection = set()
invalid = set()
tokens = [x.strip() for x in range_string.split(',')]
for token in tokens:
try:
selection.add(int(token))
except ValueError:
try:
token_parts = [int(x) for x in token.split('-')]
if len(token_parts) != 2:
raise ValueError
token_parts.sort()
selection = selection.union(range(token_parts[0], token_parts[1] + 1))
except ValueError:
invalid.add(token)
return sorted(selection), invalid
|
GeoSpark/ILI9341-font-packer
|
src/range_parser.py
|
Python
|
mit
| 992
|
[
"VisIt"
] |
ad7e4134f9ebe96312f7006dc4cfccda7b65923efac8df4fda990b8dfc5fbf2e
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
import warnings
from pymatgen.analysis.ewald import EwaldSummation, EwaldMinimizer
from pymatgen.io.vasp.inputs import Poscar
import numpy as np
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class EwaldSummationTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath, check_for_POTCAR=False)
original_s = p.structure
s = original_s.copy()
s.add_oxidation_state_by_element({"Li": 1, "Fe": 2,
"P": 5, "O": -2})
ham = EwaldSummation(s, compute_forces=True)
self.assertAlmostEqual(ham.real_space_energy, -502.23549897772602, 4)
self.assertAlmostEqual(ham.reciprocal_space_energy, 6.1541071599534654, 4)
self.assertAlmostEqual(ham.point_energy, -620.22598358035918, 4)
self.assertAlmostEqual(ham.total_energy, -1123.00766, 1)
self.assertAlmostEqual(ham.forces[0, 0], -1.98818620e-01, 4)
self.assertAlmostEqual(sum(sum(abs(ham.forces))), 915.925354346, 4,
"Forces incorrect")
self.assertAlmostEqual(sum(sum(ham.real_space_energy_matrix)),
ham.real_space_energy, 4)
self.assertAlmostEqual(sum(sum(ham.reciprocal_space_energy_matrix)),
ham.reciprocal_space_energy, 4)
self.assertAlmostEqual(sum(ham.point_energy_matrix), ham.point_energy,
4)
self.assertAlmostEqual(sum(sum(ham.total_energy_matrix)) + ham._charged_cell_energy,
ham.total_energy, 2)
self.assertRaises(ValueError, EwaldSummation, original_s)
# try sites with charge.
charges = []
for site in original_s:
if site.specie.symbol == "Li":
charges.append(1)
elif site.specie.symbol == "Fe":
charges.append(2)
elif site.specie.symbol == "P":
charges.append(5)
else:
charges.append(-2)
original_s.add_site_property('charge', charges)
ham2 = EwaldSummation(original_s)
self.assertAlmostEqual(ham2.real_space_energy, -502.23549897772602, 4)
class EwaldMinimizerTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
matrix = np.array([[-3., 3., 4., -0., 3., 3., 1., 14., 9., -4.],
[1., -3., -3., 12., -4., -1., 5., 11., 1., 12.],
[14., 7., 13., 15., 13., 5., -5., 10., 14., -2.],
[9., 13., 4., 1., 3., -4., 7., 0., 6., -4.],
[4., -4., 6., 1., 12., -4., -2., 13., 0., 6.],
[13., 7., -4., 12., -2., 9., 8., -5., 3., 1.],
[8., 1., 10., -4., -2., 4., 13., 12., -3., 13.],
[2., 11., 8., 1., -1., 5., -3., 4., 5., 0.],
[-0., 14., 4., 3., -1., -5., 7., -1., -1., 3.],
[2., -2., 10., 1., 6., -5., -3., 12., 0., 13.]])
m_list = [[.9, 4, [1, 2, 3, 4, 8], 'a'], [-1, 2, [5, 6, 7], 'b']]
e_min = EwaldMinimizer(matrix, m_list, 50)
self.assertEqual(len(e_min.output_lists), 15,
"Wrong number of permutations returned")
self.assertAlmostEqual(e_min.minimized_sum, 111.63, 3,
"Returned wrong minimum value")
self.assertEqual(len(e_min.best_m_list), 6,
"Returned wrong number of permutations")
def test_site(self):
"""Test that uses an uncharged structure"""
filepath = os.path.join(test_dir, 'POSCAR')
p = Poscar.from_file(filepath, check_for_POTCAR=False)
original_s = p.structure
s = original_s.copy()
s.add_oxidation_state_by_element({"Li": 1, "Fe": 3,
"P": 5, "O": -2})
# Comparison to LAMMPS result
ham = EwaldSummation(s, compute_forces=True)
self.assertAlmostEquals(-1226.3335, ham.total_energy, 3)
self.assertAlmostEquals(-45.8338, ham.get_site_energy(0), 3)
self.assertAlmostEquals(-27.2978, ham.get_site_energy(8), 3)
if __name__ == "__main__":
unittest.main()
|
gVallverdu/pymatgen
|
pymatgen/analysis/tests/test_ewald.py
|
Python
|
mit
| 4,740
|
[
"LAMMPS",
"VASP",
"pymatgen"
] |
6b8d58e0407317e24ca7b8e7e1d1e64084fa216d33af5c76a3ec8a0b9473d382
|
## Copyright 2015-2017 Tom Brown (FIAS), Jonas Hoersch (FIAS)
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Functions for importing and exporting data.
"""
# make the code as Python 3 compatible as possible
from __future__ import division, absolute_import
from six import iteritems, iterkeys, string_types
from six.moves import filter, range
__author__ = "Tom Brown (FIAS), Jonas Hoersch (FIAS)"
__copyright__ = "Copyright 2015-2017 Tom Brown (FIAS), Jonas Hoersch (FIAS), GNU GPL 3"
import logging
logger = logging.getLogger(__name__)
import os
from textwrap import dedent
from glob import glob
import pandas as pd
import pypsa
import numpy as np
import math
try:
import xarray as xr
has_xarray = True
except ImportError:
has_xarray = False
class ImpExper(object):
ds = None
def __enter__(self):
if self.ds is not None:
self.ds = self.ds.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.finish()
if self.ds is not None:
self.ds.__exit__(exc_type, exc_val, exc_tb)
def finish(self):
pass
class Exporter(ImpExper):
def remove_static(self, list_name):
pass
def remove_series(self, list_name, attr):
pass
class Importer(ImpExper):
pass
class ImporterCSV(Importer):
def __init__(self, csv_folder_name, encoding):
self.csv_folder_name = csv_folder_name
self.encoding = encoding
assert os.path.isdir(csv_folder_name), "Directory {} does not exist.".format(csv_folder_name)
def get_attributes(self):
fn = os.path.join(self.csv_folder_name, "network.csv")
if not os.path.isfile(fn): return None
return dict(pd.read_csv(fn, encoding=self.encoding).iloc[0])
def get_snapshots(self):
fn = os.path.join(self.csv_folder_name, "snapshots.csv")
if not os.path.isfile(fn): return None
return pd.read_csv(fn, index_col=0, encoding=self.encoding, parse_dates=True)
def get_static(self, list_name):
fn = os.path.join(self.csv_folder_name, list_name + ".csv")
return (pd.read_csv(fn, index_col=0, encoding=self.encoding)
if os.path.isfile(fn) else None)
def get_series(self, list_name):
for fn in os.listdir(self.csv_folder_name):
if fn.startswith(list_name+"-") and fn.endswith(".csv"):
attr = fn[len(list_name)+1:-4]
df = pd.read_csv(os.path.join(self.csv_folder_name, fn),
index_col=0, encoding=self.encoding, parse_dates=True)
yield attr, df
class ExporterCSV(Exporter):
def __init__(self, csv_folder_name, encoding):
self.csv_folder_name = csv_folder_name
self.encoding = encoding
#make sure directory exists
if not os.path.isdir(csv_folder_name):
logger.warning("Directory {} does not exist, creating it"
.format(csv_folder_name))
os.mkdir(csv_folder_name)
def save_attributes(self, attrs):
name = attrs.pop('name')
df = pd.DataFrame(attrs, index=pd.Index([name], name='name'))
fn = os.path.join(self.csv_folder_name, "network.csv")
df.to_csv(fn, encoding=self.encoding)
def save_snapshots(self, snapshots):
fn = os.path.join(self.csv_folder_name, "snapshots.csv")
snapshots.to_csv(fn, encoding=self.encoding)
def save_static(self, list_name, df):
fn = os.path.join(self.csv_folder_name, list_name + ".csv")
df.to_csv(fn, encoding=self.encoding)
def save_series(self, list_name, attr, df):
fn = os.path.join(self.csv_folder_name, list_name + "-" + attr + ".csv")
df.to_csv(fn, encoding=self.encoding)
def remove_static(self, list_name):
fns = glob(os.path.join(self.csv_folder_name, list_name) + "*.csv")
if fns:
for fn in fns: os.unlink(fn)
logger.warning("Stale csv file(s) {} removed".format(', '.join(fns)))
def remove_series(self, list_name, attr):
fn = os.path.join(self.csv_folder_name, list_name + "-" + attr + ".csv")
if os.path.exists(fn):
os.unlink(fn)
class ImporterHDF5(Importer):
def __init__(self, path):
self.ds = pd.HDFStore(path, mode='r')
self.index = {}
def get_attributes(self):
return dict(self.ds["/network"].reset_index().iloc[0])
def get_snapshots(self):
return self.ds["/snapshots"] if "/snapshots" in self.ds else None
def get_static(self, list_name):
if "/" + list_name not in self.ds:
return None
if self.pypsa_version is None or self.pypsa_version < [0, 13, 1]:
df = self.ds["/" + list_name]
else:
df = self.ds["/" + list_name].set_index('name')
self.index[list_name] = df.index
return df
def get_series(self, list_name):
for tab in self.ds:
if tab.startswith('/' + list_name + '_t/'):
attr = tab[len('/' + list_name + '_t/'):]
df = self.ds[tab]
if self.pypsa_version is not None and self.pypsa_version > [0, 13, 0]:
df.columns = self.index[list_name][df.columns]
yield attr, df
class ExporterHDF5(Exporter):
def __init__(self, path, **kwargs):
self.ds = pd.HDFStore(path, mode='w', **kwargs)
self.index = {}
def save_attributes(self, attrs):
name = attrs.pop('name')
self.ds.put('/network',
pd.DataFrame(attrs, index=pd.Index([name], name='name')),
format='table', index=False)
def save_snapshots(self, snapshots):
self.ds.put('/snapshots', snapshots, format='table', index=False)
def save_static(self, list_name, df):
df.index.name = 'name'
self.index[list_name] = df.index
df = df.reset_index()
self.ds.put('/' + list_name, df, format='table', index=False)
def save_series(self, list_name, attr, df):
df.columns = self.index[list_name].get_indexer(df.columns)
self.ds.put('/' + list_name + '_t/' + attr, df, format='table', index=False)
if has_xarray:
class ImporterNetCDF(Importer):
def __init__(self, path):
self.path = path
if isinstance(path, string_types):
self.ds = xr.open_dataset(path)
else:
self.ds = path
def __enter__(self):
if isinstance(self.path, string_types):
super(ImporterNetCDF, self).__init__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if isinstance(self.path, string_types):
super(ImporterNetCDF, self).__exit__(exc_type, exc_val, exc_tb)
def get_attributes(self):
return {attr[len('network_'):]: val
for attr, val in iteritems(self.ds.attrs)
if attr.startswith('network_')}
def get_snapshots(self):
return self.get_static('snapshots', 'snapshots')
def get_static(self, list_name, index_name=None):
t = list_name + '_'
i = len(t)
if index_name is None:
index_name = list_name + '_i'
if index_name not in self.ds.coords:
return None
index = self.ds.coords[index_name].to_index().rename('name')
df = pd.DataFrame(index=index)
for attr in iterkeys(self.ds.data_vars):
if attr.startswith(t) and attr[i:i+2] != 't_':
df[attr[i:]] = self.ds[attr].to_pandas()
return df
def get_series(self, list_name):
t = list_name + '_t_'
for attr in iterkeys(self.ds.data_vars):
if attr.startswith(t):
df = self.ds[attr].to_pandas()
df.index.name = 'name'
df.columns.name = 'name'
yield attr[len(t):], df
class ExporterNetCDF(Exporter):
def __init__(self, path, least_significant_digit=None):
self.path = path
self.least_significant_digit = least_significant_digit
self.ds = xr.Dataset()
def save_attributes(self, attrs):
self.ds.attrs.update(('network_' + attr, val)
for attr, val in iteritems(attrs))
def save_snapshots(self, snapshots):
snapshots.index.name = 'snapshots'
for attr in snapshots.columns:
self.ds['snapshots_' + attr] = snapshots[attr]
def save_static(self, list_name, df):
df.index.name = list_name + '_i'
self.ds[list_name + '_i'] = df.index
for attr in df.columns:
self.ds[list_name + '_' + attr] = df[attr]
def save_series(self, list_name, attr, df):
df.index.name = 'snapshots'
df.columns.name = list_name + '_t_' + attr + '_i'
self.ds[list_name + '_t_' + attr] = df
if self.least_significant_digit is not None:
print(self.least_significant_digit)
self.ds.encoding.update({
'zlib': True,
'least_significant_digit': self.least_significant_digit
})
def finish(self):
if self.path is not None:
self.ds.to_netcdf(self.path)
def _export_to_exporter(network, exporter, basename, export_standard_types=False):
"""
Export to exporter.
Both static and series attributes of components are exported, but only
if they have non-default values.
Parameters
----------
exporter : Exporter
Initialized exporter instance
basename : str
Basename, used for logging
export_standard_types : boolean, default False
If True, then standard types are exported too (upon reimporting you
should then set "ignore_standard_types" when initialising the netowrk).
"""
#exportable component types
#what about None???? - nan is float?
allowed_types = (float,int,bool) + string_types + tuple(np.typeDict.values())
#first export network properties
attrs = dict((attr, getattr(network, attr))
for attr in dir(network)
if (not attr.startswith("__") and
isinstance(getattr(network,attr), allowed_types)))
exporter.save_attributes(attrs)
#now export snapshots
snapshots = pd.DataFrame(dict(weightings=network.snapshot_weightings),
index=pd.Index(network.snapshots, name="name"))
exporter.save_snapshots(snapshots)
exported_components = []
for component in network.all_components - {"SubNetwork"}:
list_name = network.components[component]["list_name"]
attrs = network.components[component]["attrs"]
df = network.df(component)
pnl = network.pnl(component)
if not export_standard_types and component in network.standard_type_components:
df = df.drop(network.components[component]["standard_types"].index)
# first do static attributes
df.index.name = "name"
if df.empty:
exporter.remove_static(list_name)
continue
col_export = []
for col in df.columns:
# do not export derived attributes
if col in ["sub_network", "r_pu", "x_pu", "g_pu", "b_pu"]:
continue
if col in attrs.index and pd.isnull(attrs.at[col, "default"]) and pd.isnull(df[col]).all():
continue
if (col in attrs.index
and df[col].dtype == attrs.at[col, 'dtype']
and (df[col] == attrs.at[col, "default"]).all()):
continue
col_export.append(col)
exporter.save_static(list_name, df[col_export])
#now do varying attributes
for attr in pnl:
if attr not in attrs.index:
col_export = pnl[attr].columns
else:
default = attrs.at[attr, "default"]
if pd.isnull(default):
col_export = pnl[attr].columns[(~pd.isnull(pnl[attr])).any()]
else:
col_export = pnl[attr].columns[(pnl[attr] != default).any()]
if len(col_export) > 0:
df = pnl[attr][col_export]
exporter.save_series(list_name, attr, df)
else:
exporter.remove_series(list_name, attr)
exported_components.append(list_name)
logger.info("Exported network {} has {}".format(basename, ", ".join(exported_components)))
def import_from_csv_folder(network, csv_folder_name, encoding=None, skip_time=False):
"""
Import network data from CSVs in a folder.
The CSVs must follow the standard form, see ``pypsa/examples``.
Parameters
----------
csv_folder_name : string
Name of folder
encoding : str, default None
Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_
skip_time : bool, default False
Skip reading in time dependent attributes
Examples
----------
>>> network.import_from_csv_folder(csv_folder_name)
"""
basename = os.path.basename(csv_folder_name)
with ImporterCSV(csv_folder_name, encoding=encoding) as importer:
_import_from_importer(network, importer, basename=basename, skip_time=skip_time)
def export_to_csv_folder(network, csv_folder_name, encoding=None, export_standard_types=False):
"""
Export network and components to a folder of CSVs.
Both static and series attributes of all components are exported, but only
if they have non-default values.
If ``csv_folder_name`` does not already exist, it is created.
Static attributes are exported in one CSV file per component,
e.g. ``generators.csv``.
Series attributes are exported in one CSV file per component per
attribute, e.g. ``generators-p_set.csv``.
Parameters
----------
csv_folder_name : string
Name of folder to which to export.
encoding : str, default None
Encoding to use for UTF when reading (ex. 'utf-8'). `List of Python
standard encodings
<https://docs.python.org/3/library/codecs.html#standard-encodings>`_
export_standard_types : boolean, default False
If True, then standard types are exported too (upon reimporting you
should then set "ignore_standard_types" when initialising the network).
Examples
--------
>>> network.export_to_csv_folder(csv_folder_name)
"""
basename = os.path.basename(csv_folder_name)
with ExporterCSV(csv_folder_name=csv_folder_name, encoding=encoding) as exporter:
_export_to_exporter(network, exporter, basename=basename,
export_standard_types=export_standard_types)
def import_from_hdf5(network, path, skip_time=False):
"""
Import network data from HDF5 store at `path`.
Parameters
----------
path : string
Name of HDF5 store
skip_time : bool, default False
Skip reading in time dependent attributes
"""
basename = os.path.basename(path)
with ImporterHDF5(path) as importer:
_import_from_importer(network, importer, basename=basename, skip_time=skip_time)
def export_to_hdf5(network, path, export_standard_types=False, **kwargs):
"""
Export network and components to an HDF store.
Both static and series attributes of components are exported, but only
if they have non-default values.
If path does not already exist, it is created.
Parameters
----------
path : string
Name of hdf5 file to which to export (if it exists, it is overwritten)
export_standard_types : boolean, default False
If True, then standard types are exported too (upon reimporting you
should then set "ignore_standard_types" when initialising the network).
**kwargs
Extra arguments for pd.HDFStore to specify f.i. compression
(default: complevel=4)
Examples
--------
>>> network.export_to_hdf5(filename)
"""
kwargs.setdefault('complevel', 4)
basename = os.path.basename(path)
with ExporterHDF5(path, **kwargs) as exporter:
_export_to_exporter(network, exporter, basename=basename,
export_standard_types=export_standard_types)
def import_from_netcdf(network, path, skip_time=False):
"""
Import network data from netCDF file or xarray Dataset at `path`.
Parameters
----------
path : string|xr.Dataset
Path to netCDF dataset or instance of xarray Dataset
skip_time : bool, default False
Skip reading in time dependent attributes
"""
assert has_xarray, "xarray must be installed for netCDF support."
basename = os.path.basename(path) if isinstance(path, string_types) else None
with ImporterNetCDF(path=path) as importer:
_import_from_importer(network, importer, basename=basename,
skip_time=skip_time)
def export_to_netcdf(network, path=None, export_standard_types=False,
least_significant_digit=None):
"""Export network and components to a netCDF file.
Both static and series attributes of components are exported, but only
if they have non-default values.
If path does not already exist, it is created.
If no path is passed, no file is exported, but the xarray.Dataset
is still returned.
Be aware that this cannot export boolean attributes on the Network
class, e.g. network.my_bool = False is not supported by netCDF.
Parameters
----------
path : string|None
Name of netCDF file to which to export (if it exists, it is overwritten);
if None is passed, no file is exported.
export_standard_types : boolean, default False
If True, then standard types are exported too (upon reimporting you
should then set "ignore_standard_types" when initialising the network).
least_significant_digit
This is passed to the netCDF exporter, but currently makes no difference
to file size or float accuracy. We're working on improving this...
Returns
-------
ds : xarray.Dataset
Examples
--------
>>> network.export_to_netcdf("my_file.nc")
"""
assert has_xarray, "xarray must be installed for netCDF support."
basename = os.path.basename(path) if path is not None else None
with ExporterNetCDF(path, least_significant_digit) as exporter:
_export_to_exporter(network, exporter, basename=basename,
export_standard_types=export_standard_types)
return exporter.ds
def _import_from_importer(network, importer, basename, skip_time=False):
"""
Import network data from importer.
Parameters
----------
skip_time : bool
Skip importing time
"""
attrs = importer.get_attributes()
current_pypsa_version = [int(s) for s in network.pypsa_version.split(".")]
pypsa_version = None
if attrs is not None:
network.name = attrs.pop('name')
try:
pypsa_version = [int(s) for s in attrs.pop("pypsa_version").split(".")]
except KeyError:
pypsa_version = None
for attr, val in iteritems(attrs):
setattr(network, attr, val)
##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types
if pypsa_version is None or pypsa_version < current_pypsa_version:
logger.warning(dedent("""
Importing PyPSA from older version of PyPSA than current version {}.
Please read the release notes at https://pypsa.org/doc/release_notes.html
carefully to prepare your network for import.
""").format(network.pypsa_version))
importer.pypsa_version = pypsa_version
importer.current_pypsa_version = current_pypsa_version
# if there is snapshots.csv, read in snapshot data
df = importer.get_snapshots()
if df is not None:
network.set_snapshots(df.index)
if "weightings" in df.columns:
network.snapshot_weightings = df["weightings"].reindex(network.snapshots)
imported_components = []
# now read in other components; make sure buses and carriers come first
for component in ["Bus", "Carrier"] + sorted(network.all_components - {"Bus", "Carrier", "SubNetwork"}):
list_name = network.components[component]["list_name"]
df = importer.get_static(list_name)
if df is None:
if component == "Bus":
logger.error("Error, no buses found")
return
else:
continue
import_components_from_dataframe(network, df, component)
if not skip_time:
for attr, df in importer.get_series(list_name):
import_series_from_dataframe(network, df, component, attr)
logger.debug(getattr(network,list_name))
imported_components.append(list_name)
logger.info("Imported network{} has {}".format(" " + basename, ", ".join(imported_components)))
def import_components_from_dataframe(network, dataframe, cls_name):
"""
Import components from a pandas DataFrame.
If columns are missing then defaults are used.
If extra columns are added, these are left in the resulting component dataframe.
Parameters
----------
dataframe : pandas.DataFrame
A DataFrame whose index is the names of the components and
whose columns are the non-default attributes.
cls_name : string
Name of class of component, e.g. ``"Line","Bus","Generator", "StorageUnit"``
Examples
--------
>>> import pandas as pd
>>> buses = ['Berlin', 'Frankfurt', 'Munich', 'Hamburg']
>>> network.import_components_from_dataframe(
pd.DataFrame({"v_nom" : 380, "control" : 'PV'},
index=buses),
"Bus")
>>> network.import_components_from_dataframe(
pd.DataFrame({"carrier" : "solar", "bus" : buses, "p_nom_extendable" : True},
index=[b+" PV" for b in buses]),
"Generator")
See Also
--------
pypsa.Network.madd
"""
if cls_name == "Generator" and "source" in dataframe.columns:
logger.warning("'source' for generators is deprecated, use 'carrier' instead.")
if cls_name == "Generator" and "dispatch" in dataframe.columns:
logger.warning("'dispatch' for generators is deprecated, use time-varing 'p_max_pu' for 'variable' and static 'p_max_pu' for 'flexible'.")
if cls_name in ["Generator","StorageUnit"] and "p_max_pu_fixed" in dataframe.columns:
logger.warning("'p_max_pu_fixed' for generators is deprecated, use static 'p_max_pu' instead.")
if cls_name in ["Generator","StorageUnit"] and "p_min_pu_fixed" in dataframe.columns:
logger.warning("'p_min_pu_fixed' for generators is deprecated, use static 'p_min_pu' instead.")
if cls_name == "Bus" and "current_type" in dataframe.columns:
logger.warning("'current_type' for buses is deprecated, use 'carrier' instead.")
if cls_name == "Link" and "s_nom" in dataframe.columns:
logger.warning("'s_nom*' for links is deprecated, use 'p_nom*' instead.")
attrs = network.components[cls_name]["attrs"]
static_attrs = attrs[attrs.static].drop("name")
non_static_attrs = attrs[~attrs.static]
# Clean dataframe and ensure correct types
dataframe = pd.DataFrame(dataframe)
dataframe.index = dataframe.index.astype(str)
for k in static_attrs.index:
if k not in dataframe.columns:
dataframe[k] = static_attrs.at[k, "default"]
else:
if static_attrs.at[k, "type"] == 'string':
dataframe[k] = dataframe[k].replace({np.nan: ""})
dataframe[k] = dataframe[k].astype(static_attrs.at[k, "typ"])
#check all the buses are well-defined
for attr in ["bus", "bus0", "bus1"]:
if attr in dataframe.columns:
missing = dataframe.index[~dataframe[attr].isin(network.buses.index)]
if len(missing) > 0:
logger.warning("The following %s have buses which are not defined:\n%s",
cls_name, missing)
non_static_attrs_in_df = non_static_attrs.index.intersection(dataframe.columns)
old_df = network.df(cls_name)
new_df = dataframe.drop(non_static_attrs_in_df, axis=1)
if not old_df.empty:
new_df = pd.concat((old_df, new_df), sort=False)
if not new_df.index.is_unique:
logger.error("Error, new components for {} are not unique".format(cls_name))
return
setattr(network, network.components[cls_name]["list_name"], new_df)
#now deal with time-dependent properties
pnl = network.pnl(cls_name)
for k in non_static_attrs_in_df:
#If reading in outputs, fill the outputs
pnl[k] = pnl[k].reindex(columns=new_df.index,
fill_value=non_static_attrs.at[k, "default"])
pnl[k].loc[:,dataframe.index] = dataframe.loc[:,k].values
setattr(network,network.components[cls_name]["list_name"]+"_t",pnl)
def import_series_from_dataframe(network, dataframe, cls_name, attr):
"""
Import time series from a pandas DataFrame.
Parameters
----------
dataframe : pandas.DataFrame
A DataFrame whose index is ``network.snapshots`` and
whose columns are a subset of the relevant components.
cls_name : string
Name of class of component
attr : string
Name of time-varying series attribute
Examples
--------
>>> import numpy as np
>>> network.set_snapshots(range(10))
>>> network.import_series_from_dataframe(
pd.DataFrame(np.random.rand(10,4),
columns=network.generators.index,
index=range(10)),
"Generator",
"p_max_pu")
See Also
--------
pypsa.Network.madd()
"""
df = network.df(cls_name)
pnl = network.pnl(cls_name)
list_name = network.components[cls_name]["list_name"]
diff = dataframe.columns.difference(df.index)
if len(diff) > 0:
logger.warning(f"Components {diff} for attribute {attr} of {cls_name} "
f"are not in main components dataframe {list_name}")
attrs = network.components[cls_name]['attrs']
expected_attrs = attrs[lambda ds: ds.type.str.contains('series')].index
if attr not in expected_attrs:
pnl[attr] = dataframe
return
attr_series = attrs.loc[attr]
default = attr_series.default
columns = dataframe.columns
diff = network.snapshots.difference(dataframe.index)
if len(diff):
logger.warning(f"Snapshots {diff} are missing from {attr} of {cls_name}."
f" Filling with default value '{default}'")
dataframe = dataframe.reindex(network.snapshots, fill_value=default)
if not attr_series.static:
pnl[attr] = pnl[attr].reindex(columns=df.index | columns, fill_value=default)
else:
pnl[attr] = pnl[attr].reindex(columns=(pnl[attr].columns | columns))
pnl[attr].loc[network.snapshots, columns] = dataframe.loc[network.snapshots, columns]
def import_from_pypower_ppc(network, ppc, overwrite_zero_s_nom=None):
"""
Import network from PYPOWER PPC dictionary format version 2.
Converts all baseMVA to base power of 1 MVA.
For the meaning of the pypower indices, see also pypower/idx_*.
Parameters
----------
ppc : PYPOWER PPC dict
overwrite_zero_s_nom : Float or None, default None
Examples
--------
>>> from pypower.api import case30
>>> ppc = case30()
>>> network.import_from_pypower_ppc(ppc)
"""
version = ppc["version"]
if int(version) != 2:
logger.warning("Warning, importing from PYPOWER may not work if PPC version is not 2!")
logger.warning("Warning: Note that when importing from PYPOWER, some PYPOWER features not supported: areas, gencosts, component status")
baseMVA = ppc["baseMVA"]
#dictionary to store pandas DataFrames of PyPower data
pdf = {}
# add buses
#integer numbering will be bus names
index = np.array(ppc['bus'][:,0],dtype=int)
columns = ["type","Pd","Qd","Gs","Bs","area","v_mag_pu_set","v_ang_set","v_nom","zone","v_mag_pu_max","v_mag_pu_min"]
pdf["buses"] = pd.DataFrame(index=index,columns=columns,data=ppc['bus'][:,1:len(columns)+1])
if (pdf["buses"]["v_nom"] == 0.).any():
logger.warning("Warning, some buses have nominal voltage of 0., setting the nominal voltage of these to 1.")
pdf['buses'].loc[pdf['buses']['v_nom'] == 0.,'v_nom'] = 1.
#rename controls
controls = ["","PQ","PV","Slack"]
pdf["buses"]["control"] = pdf["buses"].pop("type").map(lambda i: controls[int(i)])
#add loads for any buses with Pd or Qd
pdf['loads'] = pdf["buses"].loc[pdf["buses"][["Pd","Qd"]].any(axis=1), ["Pd","Qd"]]
pdf['loads']['bus'] = pdf['loads'].index
pdf['loads'].rename(columns={"Qd" : "q_set", "Pd" : "p_set"}, inplace=True)
pdf['loads'].index = ["L"+str(i) for i in range(len(pdf['loads']))]
#add shunt impedances for any buses with Gs or Bs
shunt = pdf["buses"].loc[pdf["buses"][["Gs","Bs"]].any(axis=1), ["v_nom","Gs","Bs"]]
#base power for shunt is 1 MVA, so no need to rebase here
shunt["g"] = shunt["Gs"]/shunt["v_nom"]**2
shunt["b"] = shunt["Bs"]/shunt["v_nom"]**2
pdf['shunt_impedances'] = shunt.reindex(columns=["g","b"])
pdf['shunt_impedances']["bus"] = pdf['shunt_impedances'].index
pdf['shunt_impedances'].index = ["S"+str(i) for i in range(len(pdf['shunt_impedances']))]
#add gens
#it is assumed that the pypower p_max is the p_nom
#could also do gen.p_min_pu = p_min/p_nom
columns = "bus, p_set, q_set, q_max, q_min, v_set_pu, mva_base, status, p_nom, p_min, Pc1, Pc2, Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf".split(", ")
index = ["G"+str(i) for i in range(len(ppc['gen']))]
pdf['generators'] = pd.DataFrame(index=index,columns=columns,data=ppc['gen'][:,:len(columns)])
#make sure bus name is an integer
pdf['generators']['bus'] = np.array(ppc['gen'][:,0],dtype=int)
#add branchs
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
columns = 'bus0, bus1, r, x, b, s_nom, rateB, rateC, tap_ratio, phase_shift, status, v_ang_min, v_ang_max'.split(", ")
pdf['branches'] = pd.DataFrame(columns=columns,data=ppc['branch'][:,:len(columns)])
pdf['branches']['original_index'] = pdf['branches'].index
pdf['branches']["bus0"] = pdf['branches']["bus0"].astype(int)
pdf['branches']["bus1"] = pdf['branches']["bus1"].astype(int)
# s_nom = 0 indicates an unconstrained line
zero_s_nom = pdf['branches']["s_nom"] == 0.
if zero_s_nom.any():
if overwrite_zero_s_nom is not None:
pdf['branches'].loc[zero_s_nom, "s_nom"] = overwrite_zero_s_nom
else:
logger.warning("Warning: there are {} branches with s_nom equal to zero, "
"they will probably lead to infeasibilities and should be "
"replaced with a high value using the `overwrite_zero_s_nom` "
"argument.".format(zero_s_nom.sum()))
# determine bus voltages of branches to detect transformers
v_nom = pdf['branches'].bus0.map(pdf['buses'].v_nom)
v_nom_1 = pdf['branches'].bus1.map(pdf['buses'].v_nom)
# split branches into transformers and lines
transformers = ((v_nom != v_nom_1)
| ((pdf['branches'].tap_ratio != 0.) & (pdf['branches'].tap_ratio != 1.)) #NB: PYPOWER has strange default of 0. for tap ratio
| (pdf['branches'].phase_shift != 0))
pdf['transformers'] = pd.DataFrame(pdf['branches'][transformers])
pdf['lines'] = pdf['branches'][~ transformers].drop(["tap_ratio", "phase_shift"], axis=1)
#convert transformers from base baseMVA to base s_nom
pdf['transformers']['r'] = pdf['transformers']['r']*pdf['transformers']['s_nom']/baseMVA
pdf['transformers']['x'] = pdf['transformers']['x']*pdf['transformers']['s_nom']/baseMVA
pdf['transformers']['b'] = pdf['transformers']['b']*baseMVA/pdf['transformers']['s_nom']
#correct per unit impedances
pdf['lines']["r"] = v_nom**2*pdf['lines']["r"]/baseMVA
pdf['lines']["x"] = v_nom**2*pdf['lines']["x"]/baseMVA
pdf['lines']["b"] = pdf['lines']["b"]*baseMVA/v_nom**2
if (pdf['transformers']['tap_ratio'] == 0.).any():
logger.warning("Warning, some transformers have a tap ratio of 0., setting the tap ratio of these to 1.")
pdf['transformers'].loc[pdf['transformers']['tap_ratio'] == 0.,'tap_ratio'] = 1.
#name them nicely
pdf['transformers'].index = ["T"+str(i) for i in range(len(pdf['transformers']))]
pdf['lines'].index = ["L"+str(i) for i in range(len(pdf['lines']))]
#TODO
##----- OPF Data -----##
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
for component in ["Bus","Load","Generator","Line","Transformer","ShuntImpedance"]:
import_components_from_dataframe(network,pdf[network.components[component]["list_name"]],component)
network.generators["control"] = network.generators.bus.map(network.buses["control"])
#for consistency with pypower, take the v_mag set point from the generators
network.buses.loc[network.generators.bus,"v_mag_pu_set"] = np.asarray(network.generators["v_set_pu"])
def import_from_pandapower_net(network, net, extra_line_data=False):
"""
Import network from pandapower net.
Importing from pandapower is still in beta;
not all pandapower data is supported.
Unsupported features include:
- three-winding transformers
- switches
- in_service status,
- shunt impedances, and
- tap positions of transformers."
Parameters
----------
net : pandapower network
extra_line_data : boolean, default: False
if True, the line data for all parameters is imported instead of only
the type
Examples
--------
>>> network.import_from_pandapower_net(net)
OR
>>> import pandapower as pp
>>> import pandapower.networks as pn
>>> net = pn.create_cigre_network_mv(with_der='all')
>>> pp.runpp(net)
>>> network.import_from_pandapower_net(net, extra_line_data=True)
"""
logger.warning("Warning: Importing from pandapower is still in beta; not all pandapower data is supported.\nUnsupported features include: three-winding transformers, switches, in_service status, shunt impedances and tap positions of transformers.")
d = {}
d["Bus"] = pd.DataFrame({"v_nom" : net.bus.vn_kv.values,
"v_mag_pu_set" : 1.},
index=net.bus.name)
d["Load"] = pd.DataFrame({"p_set" : (net.load.scaling*net.load.p_mw).values,
"q_set" : (net.load.scaling*net.load.q_mvar).values,
"bus" : net.bus.name.loc[net.load.bus].values},
index=net.load.name)
#deal with PV generators
d["Generator"] = pd.DataFrame({"p_set" : -(net.gen.scaling*net.gen.p_mw).values,
"q_set" : 0.,
"bus" : net.bus.name.loc[net.gen.bus].values,
"control" : "PV"},
index=net.gen.name)
d["Bus"].loc[net.bus.name.loc[net.gen.bus].values,"v_mag_pu_set"] = net.gen.vm_pu.values
#deal with PQ "static" generators
d["Generator"] = pd.concat((d["Generator"],pd.DataFrame({"p_set" : -(net.sgen.scaling*net.sgen.p_mw).values,
"q_set" : -(net.sgen.scaling*net.sgen.q_mvar).values,
"bus" : net.bus.name.loc[net.sgen.bus].values,
"control" : "PQ"},
index=net.sgen.name)), sort=False)
d["Generator"] = pd.concat((d["Generator"],pd.DataFrame({"control" : "Slack",
"p_set" : 0.,
"q_set" : 0.,
"bus" : net.bus.name.loc[net.ext_grid.bus].values},
index=net.ext_grid.name.fillna("External Grid"))), sort=False)
d["Bus"].loc[net.bus.name.loc[net.ext_grid.bus].values,"v_mag_pu_set"] = net.ext_grid.vm_pu.values
if extra_line_data == False:
d["Line"] = pd.DataFrame({"type": net.line.std_type.values,
"bus0": net.bus.name.loc[net.line.from_bus].values,
"bus1": net.bus.name.loc[net.line.to_bus].values,
"length": net.line.length_km.values,
"num_parallel": net.line.parallel.values},
index=net.line.name)
else:
r = net.line.r_ohm_per_km.values * net.line.length_km.values
x = net.line.x_ohm_per_km.values * net.line.length_km.values
# capacitance values from pandapower in nF; transformed here:
f = net.f_hz
b = net.line.c_nf_per_km.values * net.line.length_km.values*1e-9
b = b*2*math.pi*f
u = net.bus.vn_kv.loc[net.line.from_bus].values
s_nom = u*net.line.max_i_ka.values
d["Line"] = pd.DataFrame({"r" : r,
"x" : x,
"b" : b,
"s_nom" : s_nom,
"bus0" : net.bus.name.loc[net.line.from_bus].values,
"bus1" : net.bus.name.loc[net.line.to_bus].values,
"length" : net.line.length_km.values,
"num_parallel" : net.line.parallel.values},
index=net.line.name)
# check, if the trafo is based on a standard-type:
if net.trafo.std_type.any():
d["Transformer"] = pd.DataFrame({"type" : net.trafo.std_type.values,
"bus0" : net.bus.name.loc[net.trafo.hv_bus].values,
"bus1" : net.bus.name.loc[net.trafo.lv_bus].values,
"tap_position" : net.trafo.tap_pos.values},
index=net.trafo.name)
d["Transformer"] = d["Transformer"].fillna(0)
# if it's not based on a standard-type - get the included values:
else:
s_nom = net.trafo.sn_mva.values/1000.
r = net.trafo.vkr_percent.values/100.
x = np.sqrt((net.trafo.vk_percent.values/100.)**2 - r**2)
# NB: b and g are per unit of s_nom
g = net.trafo.pfe_kw.values/(1000. * s_nom)
# for some bizarre reason, some of the standard types in pandapower have i0^2 < g^2
b = - np.sqrt(((net.trafo.i0_percent.values/100.)**2 - g**2).clip(min=0))
d["Transformer"] = pd.DataFrame({"phase_shift" : net.trafo.shift_degree.values,
"s_nom" : s_nom,
"bus0" : net.bus.name.loc[net.trafo.hv_bus].values,
"bus1" : net.bus.name.loc[net.trafo.lv_bus].values,
"r" : r,
"x" : x,
"g" : g,
"b" : b,
"tap_position" : net.trafo.tap_pos.values},
index=net.trafo.name)
d["Transformer"] = d["Transformer"].fillna(0)
for c in ["Bus","Load","Generator","Line","Transformer"]:
network.import_components_from_dataframe(d[c],c)
#amalgamate buses connected by closed switches
bus_switches = net.switch[(net.switch.et=="b") & net.switch.closed]
bus_switches["stays"] = bus_switches.bus.map(net.bus.name)
bus_switches["goes"] = bus_switches.element.map(net.bus.name)
to_replace = pd.Series(bus_switches.stays.values,bus_switches.goes.values)
for i in to_replace.index:
network.remove("Bus",i)
for c in network.iterate_components({"Load","Generator"}):
c.df.bus.replace(to_replace,inplace=True)
for c in network.iterate_components({"Line","Transformer"}):
c.df.bus0.replace(to_replace,inplace=True)
c.df.bus1.replace(to_replace,inplace=True)
|
FRESNA/PyPSA
|
pypsa/io.py
|
Python
|
gpl-3.0
| 41,766
|
[
"NetCDF"
] |
decafa6df53d4e17735ba4deedd115afcd42cf355862b5c05c815e13a9669f78
|
#------------------------------------------------------------------------------
# pycparser: c-to-c.py
#
# Example of using pycparser.c_generator, serving as a simplistic translator
# from C to AST and back to C.
#
# Eli Bendersky [http://eli.thegreenplace.net]
# License: BSD
#------------------------------------------------------------------------------
from __future__ import print_function
import sys
# This is not required if you've installed pycparser into
# your site-packages/ with setup.py
#
sys.path.extend(['.', '..'])
from pycparser import parse_file, c_parser, c_generator
def translate_to_c(filename):
""" Simply use the c_generator module to emit a parsed AST.
"""
ast = parse_file(filename, use_cpp=True)
generator = c_generator.CGenerator()
print(generator.visit(ast))
def _zz_test_translate():
# internal use
src = r'''
void f(char * restrict joe){}
int main(void)
{
unsigned int long k = 4;
int p = - - k;
return 0;
}
'''
parser = c_parser.CParser()
ast = parser.parse(src)
ast.show()
generator = c_generator.CGenerator()
print(generator.visit(ast))
# tracing the generator for debugging
#~ import trace
#~ tr = trace.Trace(countcallers=1)
#~ tr.runfunc(generator.visit, ast)
#~ tr.results().write_results()
#------------------------------------------------------------------------------
if __name__ == "__main__":
#_zz_test_translate()
if len(sys.argv) > 1:
translate_to_c(sys.argv[1])
else:
print("Please provide a filename as argument")
|
CtheSky/pycparser
|
examples/c-to-c.py
|
Python
|
bsd-3-clause
| 1,584
|
[
"VisIt"
] |
d511d1068e1c1d4edbb43b6368e80ed57ff9b1d6f78dbeff8b3f5204964a8398
|
# Copyright (C) 2006-2011, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Author: James Krycka
"""
This module contains a custom About Dialog class and associated text strings
used for informational display purposes. Note that the product version is
maintained in the version.py file and therefore is imported here.
"""
import wx
try:
from agw.hyperlink import HyperLinkCtrl
except ImportError: # if it's not there, try the older location.
from wx.lib.agw.hyperlink import HyperLinkCtrl
from wx.lib.wordwrap import wordwrap
from .. import __version__ as APP_VERSION
from .utilities import resource
# Resource files.
PROG_ICON = "direfl.ico"
# Text strings used in About Dialog boxes and for other project identification
# purposes.
#
# Note that paragraphs intended to be processed by wordwrap are formatted as
# one string without newline characters.
APP_NAME = "DiRefl"
APP_TITLE = "DiRefl - Direct Inversion Reflectometry"
APP_COPYRIGHT = "(C) 2011 University of Maryland"
APP_DESCRIPTION = """\
The Direct Inversion Reflectometry (DiRefl) application generates a scattering \
length density (SLD) profile of a thin film or free form sample using two \
neutron scattering datasets without the need to perform a fit of the data. \
DiRefl also has a simulation capability for creating datasets from a simple \
model description of the sample material.
DiRefl applies phase reconstruction and direct inversion techniques to analyze \
the reflectivity datasets produced by the two neutron scattering experiments \
performed on a single or multi-layer sample sandwiched between incident and \
substrate layers whose characteristics are known. The only setup difference \
between the runs is that the user changes the composition of one of the \
surrounding layers.
The primary output from DiRefl is a SLD profile graph of the sample along with \
other supporting plots that can be saved or printed. Optionally, the raw data \
used to generate the plots can be saved. In addition, the user is able to \
load, edit, and save model information, load and view their reflectometry \
datasets, edit instrument and measurement settings that are used to calculate \
resolution, and adjust inversion parameters that affect the qualitative \
results of the analysis.
"""
APP_LICENSE = """\
Permission is hereby granted, free of charge, to any person obtaining a copy \
of this software and associated documentation files (the "Software"), to deal \
in the Software without restriction, including without limitation the rights \
to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell \
copies of the Software, and to permit persons to whom the Software is \
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in \
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN \
THE SOFTWARE.
"""
APP_CREDITS = """\
This program was developed jointly by the University of Maryland (UMD) and \
the National Institute of Standards and Technology (NIST). The research and \
development of the phase reconstruction and inversion algorithms was performed \
by scientists at NIST and initially coded in Fortran. The port of this code \
to Python and the design and development of the DiRefl application was a joint \
effort by UMD and NIST as part of the Distributed Data Analysis of Neutron \
Scattering Experiments (DANSE) project funded by the US National Science \
Foundation under grant DMR-0520547. Principal contributors:
Paul Kienzle, NIST
- Implementation of simulation, reconstruction, and inversion functions
including the reflectivity and resolution calculations
Charles Majkrzak, NIST
- Phase reconstruction algorithm
Norm Berk, NIST
- Phase inversion algorithm
James Krycka, UMD
- Graphical User Interface design and development
"""
APP_PROJECT_URL = "http://reflectometry.org/danse"
APP_PROJECT_TAG = "DANSE/Reflectometry home page"
APP_TUTORIAL_URL = "http://www.reflectometry.org/danse/packages.html"
APP_TUTORIAL_TAG = "DANSE/Reflectometry documentation"
APP_TUTORIAL = """\
For the DiRefl User's Guide and related information, please visit:\
"""
#==============================================================================
class AboutDialog(wx.Dialog):
"""
This class creates a pop-up About Dialog box with several display options.
"""
def __init__(self,
parent=None,
id=wx.ID_ANY,
title="About",
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE,
show_name=True,
show_notice=True,
show_link=True,
show_link_docs=False,
info="..."
):
wx.Dialog.__init__(self, parent, id, title, pos, size, style)
# Display the application's icon in the title bar.
icon = wx.Icon(resource(PROG_ICON), wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
# Set the font for this window and all child windows (widgets) from the
# parent window, or from the system defaults if no parent is given.
# A dialog box does not inherit font info from its parent, so we will
# explicitly get it from the parent and apply it to the dialog box.
if parent is not None:
font = parent.GetFont()
self.SetFont(font)
# Display program name and version.
if show_name:
prog = wx.StaticText(self, wx.ID_ANY,
label=(APP_NAME + " " + APP_VERSION))
font = prog.GetFont()
font.SetPointSize(font.GetPointSize() + 1)
font.SetWeight(wx.BOLD)
prog.SetFont(font)
# Display copyright notice.
if show_notice:
copyright = wx.StaticText(self, wx.ID_ANY, label=APP_COPYRIGHT)
# Display hyperlink to the Reflectometry home page and/or doc page.
if show_link:
hyper1 = HyperLinkCtrl(self, wx.ID_ANY, label=APP_PROJECT_TAG,
URL=APP_PROJECT_URL)
if show_link_docs:
hyper2 = HyperLinkCtrl(self, wx.ID_ANY, label=APP_TUTORIAL_TAG,
URL=APP_TUTORIAL_URL)
# Display the body of text for this about dialog box.
info = wx.StaticText(self, wx.ID_ANY,
label=wordwrap(info, 530, wx.ClientDC(self)))
# Create the OK button control.
ok_button = wx.Button(self, wx.ID_OK, "OK")
ok_button.SetDefault()
# Use a vertical box sizer to manage the widget layout..
sizer = wx.BoxSizer(wx.VERTICAL)
if show_name:
sizer.Add(prog, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, border=10)
if show_notice:
sizer.Add(copyright, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, border=10)
sizer.Add(info, 0, wx.ALL, border=10)
if show_link:
sizer.Add(hyper1, 0, wx.ALL, border=10)
if show_link_docs:
sizer.Add(hyper2, 0, wx.ALL, border=10)
sizer.Add(ok_button, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, border=10)
# Finalize the sizer and establish the dimensions of the dialog box.
self.SetSizer(sizer)
sizer.Fit(self)
|
reflectometry/direfl
|
direfl/gui/about.py
|
Python
|
mit
| 8,853
|
[
"VisIt"
] |
a415abf87a26b23028b001f5ec4865d52b16c2a2c059ceeceb7a945187b516a8
|
# Copyright 2008-2014 by Michiel de Hoon. All rights reserved.
# Revisions copyright 2008-2015 by Peter Cock. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Parser for XML results returned by NCBI's Entrez Utilities.
This parser is used by the read() function in Bio.Entrez, and is not
intended be used directly.
The question is how to represent an XML file as Python objects. Some
XML files returned by NCBI look like lists, others look like dictionaries,
and others look like a mix of lists and dictionaries.
My approach is to classify each possible element in the XML as a plain
string, an integer, a list, a dictionary, or a structure. The latter is a
dictionary where the same key can occur multiple times; in Python, it is
represented as a dictionary where that key occurs once, pointing to a list
of values found in the XML file.
The parser then goes through the XML and creates the appropriate Python
object for each element. The different levels encountered in the XML are
preserved on the Python side. So a subelement of a subelement of an element
is a value in a dictionary that is stored in a list which is a value in
some other dictionary (or a value in a list which itself belongs to a list
which is a value in a dictionary, and so on). Attributes encountered in
the XML are stored as a dictionary in a member .attributes of each element,
and the tag name is saved in a member .tag.
To decide which kind of Python object corresponds to each element in the
XML, the parser analyzes the DTD referred at the top of (almost) every
XML file returned by the Entrez Utilities. This is preferred over a hand-
written solution, since the number of DTDs is rather large and their
contents may change over time. About half the code in this parser deals
wih parsing the DTD, and the other half with the XML itself.
"""
import re
import os
import warnings
from xml.parsers import expat
from io import BytesIO
import xml.etree.ElementTree as ET
# Importing these functions with leading underscore as not intended for reuse
from Bio._py3k import urlopen as _urlopen
from Bio._py3k import urlparse as _urlparse
from Bio._py3k import unicode
__docformat__ = "restructuredtext en"
# The following four classes are used to add a member .attributes to integers,
# strings, lists, and dictionaries, respectively.
class IntegerElement(int):
def __repr__(self):
text = int.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "IntegerElement(%s, attributes=%s)" % (text, repr(attributes))
class StringElement(str):
def __repr__(self):
text = str.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "StringElement(%s, attributes=%s)" % (text, repr(attributes))
class UnicodeElement(unicode):
def __repr__(self):
text = unicode.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "UnicodeElement(%s, attributes=%s)" % (text, repr(attributes))
class ListElement(list):
def __repr__(self):
text = list.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "ListElement(%s, attributes=%s)" % (text, repr(attributes))
class DictionaryElement(dict):
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
# A StructureElement is like a dictionary, but some of its keys can have
# multiple values associated with it. These values are stored in a list
# under each key.
class StructureElement(dict):
def __init__(self, keys):
dict.__init__(self)
for key in keys:
dict.__setitem__(self, key, [])
self.listkeys = keys
def __setitem__(self, key, value):
if key in self.listkeys:
self[key].append(value)
else:
dict.__setitem__(self, key, value)
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
class NotXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are in XML format." % self.msg
class CorruptedXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are not corrupted." % self.msg
class ValidationError(ValueError):
"""Validating parsers raise this error if the parser finds a tag in the XML that is not defined in the DTD. Non-validating parsers do not raise this error. The Bio.Entrez.read and Bio.Entrez.parse functions use validating parsers by default (see those functions for more information)"""
def __init__(self, name):
self.name = name
def __str__(self):
return "Failed to find tag '%s' in the DTD. To skip all tags that are not represented in the DTD, please call Bio.Entrez.read or Bio.Entrez.parse with validate=False." % self.name
class DataHandler(object):
import platform
if platform.system() == 'Windows':
directory = os.path.join(os.getenv("APPDATA"), "biopython")
else: # Unix/Linux/Mac
home = os.path.expanduser('~')
directory = os.path.join(home, '.config', 'biopython')
del home
local_dtd_dir = os.path.join(directory, 'Bio', 'Entrez', 'DTDs')
local_xsd_dir = os.path.join(directory, 'Bio', 'Entrez', 'XSDs')
del directory
del platform
try:
os.makedirs(local_dtd_dir) # use exist_ok=True on Python >= 3.2
except OSError as exception:
# Check if local_dtd_dir already exists, and that it is a directory.
# Trying os.makedirs first and then checking for os.path.isdir avoids
# a race condition.
if not os.path.isdir(local_dtd_dir):
raise exception
try:
os.makedirs(local_xsd_dir) # use exist_ok=True on Python >= 3.2
except OSError as exception:
if not os.path.isdir(local_xsd_dir):
raise exception
from Bio import Entrez
global_dtd_dir = os.path.join(str(Entrez.__path__[0]), "DTDs")
global_xsd_dir = os.path.join(str(Entrez.__path__[0]), "XSDs")
del Entrez
def __init__(self, validate):
self.stack = []
self.errors = []
self.integers = []
self.strings = []
self.lists = []
self.dictionaries = []
self.structures = {}
self.items = []
self.dtd_urls = []
self.validating = validate
self.parser = expat.ParserCreate(namespace_separator=" ")
self.parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
self.parser.XmlDeclHandler = self.xmlDeclHandler
self.is_schema = False
def read(self, handle):
"""Set up the parser and let it parse the XML results"""
# HACK: remove Bio._py3k handle conversion, since the Entrez XML parser
# expects binary data
if handle.__class__.__name__ == 'EvilHandleHack':
handle = handle._handle
if hasattr(handle, "closed") and handle.closed:
# Should avoid a possible Segmentation Fault, see:
# http://bugs.python.org/issue4877
raise IOError("Can't parse a closed handle")
try:
self.parser.ParseFile(handle)
except expat.ExpatError as e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure that
# we are parsing XML data. Most likely, the XML file is
# corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError(e)
try:
return self.object
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat didn't notice
# any errors, so self.object should be defined. If not, this is
# a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at biopython-dev@biopython.org for assistance.")
else:
# We did not see the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError("XML declaration not found")
def parse(self, handle):
BLOCK = 1024
while True:
# Read in another block of the file...
text = handle.read(BLOCK)
if not text:
# We have reached the end of the XML file
if self.stack:
# No more XML data, but there is still some unfinished
# business
raise CorruptedXMLError("Premature end of XML stream")
try:
for record in self.object:
yield record
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat
# didn't notice any errors, so self.object should be
# defined. If not, this is a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at biopython-dev@biopython.org for assistance.")
else:
# We did not see the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError("XML declaration not found")
self.parser.Parse("", True)
self.parser = None
return
try:
self.parser.Parse(text, False)
except expat.ExpatError as e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure
# that we are parsing XML data. Most likely, the XML file
# is corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError(e)
if not self.stack:
# Haven't read enough from the XML file yet
continue
records = self.stack[0]
if not isinstance(records, list):
raise ValueError("The XML file does not represent a list. Please use Entrez.read instead of Entrez.parse")
while len(records) > 1: # Then the top record is finished
record = records.pop(0)
yield record
def xmlDeclHandler(self, version, encoding, standalone):
# XML declaration found; set the handlers
self.parser.StartElementHandler = self.startElementHandler
self.parser.EndElementHandler = self.endElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
self.parser.ExternalEntityRefHandler = self.externalEntityRefHandler
self.parser.StartNamespaceDeclHandler = self.startNamespaceDeclHandler
def startNamespaceDeclHandler(self, prefix, un):
# This is an xml schema
if "Schema" in un:
self.is_schema = True
else:
raise NotImplementedError("The Bio.Entrez parser cannot handle XML data that make use of XML namespaces")
def startElementHandler(self, name, attrs):
# preprocessing the xml schema
if self.is_schema:
if len(attrs) == 1:
schema = list(attrs.values())[0]
handle = self.open_xsd_file(os.path.basename(schema))
# if there is no local xsd file grab the url and parse the file
if not handle:
handle = _urlopen(schema)
text = handle.read()
self.save_xsd_file(os.path.basename(schema), text)
handle.close()
self.parse_xsd(ET.fromstring(text))
else:
self.parse_xsd(ET.fromstring(handle.read()))
handle.close()
self.content = ""
if name in self.lists:
object = ListElement()
elif name in self.dictionaries:
object = DictionaryElement()
elif name in self.structures:
object = StructureElement(self.structures[name])
elif name in self.items: # Only appears in ESummary
name = str(attrs["Name"]) # convert from Unicode
del attrs["Name"]
itemtype = str(attrs["Type"]) # convert from Unicode
del attrs["Type"]
if itemtype == "Structure":
object = DictionaryElement()
elif name in ("ArticleIds", "History"):
object = StructureElement(["pubmed", "medline"])
elif itemtype == "List":
object = ListElement()
else:
object = StringElement()
object.itemname = name
object.itemtype = itemtype
elif name in self.strings + self.errors + self.integers:
self.attributes = attrs
return
else:
# Element not found in DTD
if self.validating:
raise ValidationError(name)
else:
# this will not be stored in the record
object = ""
if object != "":
object.tag = name
if attrs:
object.attributes = dict(attrs)
if len(self.stack) != 0:
current = self.stack[-1]
try:
current.append(object)
except AttributeError:
current[name] = object
self.stack.append(object)
def endElementHandler(self, name):
value = self.content
if name in self.errors:
if value == "":
return
else:
raise RuntimeError(value)
elif name in self.integers:
value = IntegerElement(value)
elif name in self.strings:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
elif name in self.items:
self.object = self.stack.pop()
if self.object.itemtype in ("List", "Structure"):
return
elif self.object.itemtype == "Integer" and value:
value = IntegerElement(value)
else:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
name = self.object.itemname
else:
self.object = self.stack.pop()
value = re.sub(r"[\s]+", "", value)
if self.is_schema and value:
self.object.update({'data': value})
return
value.tag = name
if self.attributes:
value.attributes = dict(self.attributes)
del self.attributes
current = self.stack[-1]
if current != "":
try:
current.append(value)
except AttributeError:
current[name] = value
def characterDataHandler(self, content):
self.content += content
def parse_xsd(self, root):
is_dictionary = False
name = ""
for child in root:
for element in child.getiterator():
if "element" in element.tag:
if "name" in element.attrib:
name = element.attrib['name']
if "attribute" in element.tag:
is_dictionary = True
if is_dictionary:
self.dictionaries.append(name)
is_dictionary = False
else:
self.lists.append(name)
def elementDecl(self, name, model):
"""This callback function is called for each element declaration:
<!ELEMENT name (...)>
encountered in a DTD. The purpose of this function is to determine
whether this element should be regarded as a string, integer, list
dictionary, structure, or error."""
if name.upper() == "ERROR":
self.errors.append(name)
return
if name == 'Item' and model == (expat.model.XML_CTYPE_MIXED,
expat.model.XML_CQUANT_REP,
None, ((expat.model.XML_CTYPE_NAME,
expat.model.XML_CQUANT_NONE,
'Item',
()
),
)
):
# Special case. As far as I can tell, this only occurs in the
# eSummary DTD.
self.items.append(name)
return
# First, remove ignorable parentheses around declarations
while (model[0] in (expat.model.XML_CTYPE_SEQ,
expat.model.XML_CTYPE_CHOICE)
and model[1] in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT)
and len(model[3]) == 1):
model = model[3][0]
# PCDATA declarations correspond to strings
if model[0] in (expat.model.XML_CTYPE_MIXED,
expat.model.XML_CTYPE_EMPTY):
self.strings.append(name)
return
# List-type elements
if (model[0] in (expat.model.XML_CTYPE_CHOICE,
expat.model.XML_CTYPE_SEQ) and
model[1] in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP)):
self.lists.append(name)
return
# This is the tricky case. Check which keys can occur multiple
# times. If only one key is possible, and it can occur multiple
# times, then this is a list. If more than one key is possible,
# but none of them can occur multiple times, then this is a
# dictionary. Otherwise, this is a structure.
# In 'single' and 'multiple', we keep track which keys can occur
# only once, and which can occur multiple times.
single = []
multiple = []
# The 'count' function is called recursively to make sure all the
# children in this model are counted. Error keys are ignored;
# they raise an exception in Python.
def count(model):
quantifier, name, children = model[1:]
if name is None:
if quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
for child in children:
multiple.append(child[2])
else:
for child in children:
count(child)
elif name.upper() != "ERROR":
if quantifier in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT):
single.append(name)
elif quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
multiple.append(name)
count(model)
if len(single) == 0 and len(multiple) == 1:
self.lists.append(name)
elif len(multiple) == 0:
self.dictionaries.append(name)
else:
self.structures.update({name: multiple})
def open_dtd_file(self, filename):
path = os.path.join(DataHandler.local_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
path = os.path.join(DataHandler.global_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
return None
def open_xsd_file(self, filename):
path = os.path.join(DataHandler.local_xsd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
path = os.path.join(DataHandler.global_xsd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
return None
def save_dtd_file(self, filename, text):
path = os.path.join(DataHandler.local_dtd_dir, filename)
try:
handle = open(path, "wb")
except IOError:
warnings.warn("Failed to save %s at %s" % (filename, path))
else:
handle.write(text)
handle.close()
def save_xsd_file(self, filename, text):
path = os.path.join(DataHandler.local_xsd_dir, filename)
try:
handle = open(path, "wb")
except IOError:
warnings.warn("Failed to save %s at %s" % (filename, path))
else:
handle.write(text)
handle.close()
def externalEntityRefHandler(self, context, base, systemId, publicId):
"""The purpose of this function is to load the DTD locally, instead
of downloading it from the URL specified in the XML. Using the local
DTD results in much faster parsing. If the DTD is not found locally,
we try to download it. If new DTDs become available from NCBI,
putting them in Bio/Entrez/DTDs will allow the parser to see them."""
urlinfo = _urlparse(systemId)
# Following attribute requires Python 2.5+
# if urlinfo.scheme=='http':
if urlinfo[0] in ['http', 'https', 'ftp']:
# Then this is an absolute path to the DTD.
url = systemId
elif urlinfo[0] == '':
# Then this is a relative path to the DTD.
# Look at the parent URL to find the full path.
try:
source = self.dtd_urls[-1]
except IndexError:
# Assume the default URL for DTDs if the top parent
# does not contain an absolute path
source = "http://www.ncbi.nlm.nih.gov/dtd/"
else:
source = os.path.dirname(source)
# urls always have a forward slash, don't use os.path.join
url = source.rstrip("/") + "/" + systemId
else:
raise ValueError("Unexpected URL scheme %r" % (urlinfo[0]))
self.dtd_urls.append(url)
# First, try to load the local version of the DTD file
location, filename = os.path.split(systemId)
handle = self.open_dtd_file(filename)
if not handle:
# DTD is not available as a local file. Try accessing it through
# the internet instead.
try:
handle = _urlopen(url)
except IOError:
raise RuntimeError("Failed to access %s at %s" % (filename, url))
text = handle.read()
handle.close()
self.save_dtd_file(filename, text)
handle = BytesIO(text)
parser = self.parser.ExternalEntityParserCreate(context)
parser.ElementDeclHandler = self.elementDecl
parser.ParseFile(handle)
handle.close()
self.dtd_urls.pop()
return 1
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/Entrez/Parser.py
|
Python
|
apache-2.0
| 24,770
|
[
"Biopython"
] |
d7632667570d67099604fe34bfab448ca930c7aab748ec8f26ee10c4d26f9e37
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
import logging
import os
from PyQt4 import QtCore, QtGui
from openlp.core.lib import MediaManagerItem, ItemCapabilities, Receiver, SettingsManager, ServiceItemContext, \
Settings, UiStrings, build_icon, check_item_selected, check_directory_exists, create_thumb, translate, \
validate_thumb
from openlp.core.lib.ui import critical_error_message_box
from openlp.core.utils import AppLocation, delete_file, locale_compare, get_images_filter
log = logging.getLogger(__name__)
class ImageMediaItem(MediaManagerItem):
"""
This is the custom media manager item for images.
"""
log.info(u'Image Media Item loaded')
def __init__(self, parent, plugin, icon):
self.IconPath = u'images/image'
MediaManagerItem.__init__(self, parent, plugin, icon)
self.quickPreviewAllowed = True
self.hasSearch = True
QtCore.QObject.connect(Receiver.get_receiver(), QtCore.SIGNAL(u'live_theme_changed'), self.liveThemeChanged)
# Allow DnD from the desktop
self.listView.activateDnD()
def retranslateUi(self):
self.onNewPrompt = translate('ImagePlugin.MediaItem',
'Select Image(s)')
file_formats = get_images_filter()
self.onNewFileMasks = u'%s;;%s (*.*) (*)' % (file_formats, UiStrings().AllFiles)
self.replaceAction.setText(UiStrings().ReplaceBG)
self.replaceAction.setToolTip(UiStrings().ReplaceLiveBG)
self.resetAction.setText(UiStrings().ResetBG)
self.resetAction.setToolTip(UiStrings().ResetLiveBG)
def requiredIcons(self):
MediaManagerItem.requiredIcons(self)
self.hasFileIcon = True
self.hasNewIcon = False
self.hasEditIcon = False
self.addToServiceItem = True
def initialise(self):
log.debug(u'initialise')
self.listView.clear()
self.listView.setIconSize(QtCore.QSize(88, 50))
self.servicePath = os.path.join(AppLocation.get_section_data_path(self.settingsSection), u'thumbnails')
check_directory_exists(self.servicePath)
self.loadList(Settings().value(self.settingsSection + u'/images files'), True)
def addListViewToToolBar(self):
MediaManagerItem.addListViewToToolBar(self)
self.listView.addAction(self.replaceAction)
def addEndHeaderBar(self):
self.replaceAction = self.toolbar.addToolbarAction(u'replaceAction',
icon=u':/slides/slide_blank.png', triggers=self.onReplaceClick)
self.resetAction = self.toolbar.addToolbarAction(u'resetAction',
icon=u':/system/system_close.png', visible=False, triggers=self.onResetClick)
def onDeleteClick(self):
"""
Remove an image item from the list
"""
# Turn off auto preview triggers.
self.listView.blockSignals(True)
if check_item_selected(self.listView, translate('ImagePlugin.MediaItem','You must select an image to delete.')):
row_list = [item.row() for item in self.listView.selectedIndexes()]
row_list.sort(reverse=True)
self.application.set_busy_cursor()
self.main_window.displayProgressBar(len(row_list))
for row in row_list:
text = self.listView.item(row)
if text:
delete_file(os.path.join(self.servicePath, text.text()))
self.listView.takeItem(row)
self.main_window.incrementProgressBar()
SettingsManager.setValue(self.settingsSection + u'/images files', self.getFileList())
self.main_window.finishedProgressBar()
self.application.set_normal_cursor()
self.listView.blockSignals(False)
def loadList(self, images, initialLoad=False):
self.application.set_busy_cursor()
if not initialLoad:
self.main_window.displayProgressBar(len(images))
# Sort the images by its filename considering language specific
# characters.
images.sort(cmp=locale_compare, key=lambda filename: os.path.split(unicode(filename))[1])
for imageFile in images:
filename = os.path.split(unicode(imageFile))[1]
thumb = os.path.join(self.servicePath, filename)
if not os.path.exists(unicode(imageFile)):
icon = build_icon(u':/general/general_delete.png')
else:
if validate_thumb(unicode(imageFile), thumb):
icon = build_icon(thumb)
else:
icon = create_thumb(unicode(imageFile), thumb)
item_name = QtGui.QListWidgetItem(filename)
item_name.setIcon(icon)
item_name.setToolTip(imageFile)
item_name.setData(QtCore.Qt.UserRole, imageFile)
self.listView.addItem(item_name)
if not initialLoad:
self.main_window.incrementProgressBar()
if not initialLoad:
self.main_window.finishedProgressBar()
self.application.set_normal_cursor()
def generateSlideData(self, service_item, item=None, xmlVersion=False,
remote=False, context=ServiceItemContext.Service):
background = QtGui.QColor(Settings().value(self.settingsSection + u'/background color'))
if item:
items = [item]
else:
items = self.listView.selectedItems()
if not items:
return False
service_item.title = unicode(self.plugin.nameStrings[u'plural'])
service_item.add_capability(ItemCapabilities.CanMaintain)
service_item.add_capability(ItemCapabilities.CanPreview)
service_item.add_capability(ItemCapabilities.CanLoop)
service_item.add_capability(ItemCapabilities.CanAppend)
# force a nonexistent theme
service_item.theme = -1
missing_items = []
missing_items_filenames = []
for bitem in items:
filename = bitem.data(QtCore.Qt.UserRole)
if not os.path.exists(filename):
missing_items.append(bitem)
missing_items_filenames.append(filename)
for item in missing_items:
items.remove(item)
# We cannot continue, as all images do not exist.
if not items:
if not remote:
critical_error_message_box(
translate('ImagePlugin.MediaItem', 'Missing Image(s)'),
translate('ImagePlugin.MediaItem', 'The following image(s) no longer exist: %s') %
u'\n'.join(missing_items_filenames))
return False
# We have missing as well as existing images. We ask what to do.
elif missing_items and QtGui.QMessageBox.question(self,
translate('ImagePlugin.MediaItem', 'Missing Image(s)'),
translate('ImagePlugin.MediaItem', 'The following image(s) no longer exist: %s\n'
'Do you want to add the other images anyway?') % u'\n'.join(missing_items_filenames),
QtGui.QMessageBox.StandardButtons(QtGui.QMessageBox.No | QtGui.QMessageBox.Yes)) == QtGui.QMessageBox.No:
return False
# Continue with the existing images.
for bitem in items:
filename = bitem.data(QtCore.Qt.UserRole)
name = os.path.split(filename)[1]
service_item.add_from_image(filename, name, background)
return True
def onResetClick(self):
"""
Called to reset the Live background with the image selected,
"""
self.resetAction.setVisible(False)
self.live_controller.display.resetImage()
def liveThemeChanged(self):
"""
Triggered by the change of theme in the slide controller
"""
self.resetAction.setVisible(False)
def onReplaceClick(self):
"""
Called to replace Live backgound with the image selected.
"""
if check_item_selected(self.listView,
translate('ImagePlugin.MediaItem', 'You must select an image to replace the background with.')):
background = QtGui.QColor(Settings().value(self.settingsSection + u'/background color'))
item = self.listView.selectedIndexes()[0]
bitem = self.listView.item(item.row())
filename = bitem.data(QtCore.Qt.UserRole)
if os.path.exists(filename):
if self.live_controller.display.directImage(filename, background):
self.resetAction.setVisible(True)
else:
critical_error_message_box(UiStrings().LiveBGError,
translate('ImagePlugin.MediaItem', 'There was no display item to amend.'))
else:
critical_error_message_box(UiStrings().LiveBGError,
translate('ImagePlugin.MediaItem', 'There was a problem replacing your background, '
'the image file "%s" no longer exists.') % filename)
def search(self, string, showError):
files = Settings().value(self.settingsSection + u'/images files')
results = []
string = string.lower()
for file in files:
filename = os.path.split(unicode(file))[1]
if filename.lower().find(string) > -1:
results.append([file, filename])
return results
|
marmyshev/transitions
|
openlp/plugins/images/lib/mediaitem.py
|
Python
|
gpl-2.0
| 11,415
|
[
"Brian"
] |
5744683bf0ada2f92e877116fafffd989bfb2ad5a286f15c2501807b051c4767
|
# Package OpenMM serialized systems in Folding@Home project directory structure.
#
# John D. Chodera <choderaj@mskcc.org> - 16 Mar 2013
# PARAMETERS
import sys
from ast import literal_eval
# Process only these targets, if specified.
# e.g. -targets '["SRC_HUMAN_PK0_P12931", "ABL1_HUMAN_PK0_P00519"]'
try:
process_only_these_targets = literal_eval( sys.argv[ sys.argv.index('-targets') + 1 ] )
except ValueError:
process_only_these_targets = False
if process_only_these_targets:
print 'Processing only these targets:'
print process_only_these_targets
# Verbose output
verbose = True
# Number of clones
nclones = 10
# If True, try to tgz the whole thing.
archive = False
#
# PACKAGE RUN
#
def generateRun(project_directory, source_directory, run, nclones, verbose=False):
"""
Build Folding@Home RUN and CLONE subdirectories from (possibly compressed) OpenMM serialized XML files.
ARGUMENTS
project_directory (string) - base project directory to place RUN in
source_directory (string) - source directory for OpenMM serialized XML files
run (int) - run index
nclones (int) - number of clones to generate
"""
if verbose: print "Building RUN %d" % run
try:
import simtk.openmm
import os, os.path, shutil
# Determine directory and pathnames.
rundir = os.path.join(project_directory, 'RUN%d' % run)
template_filename = os.path.join(rundir, 'template.txt')
seqid_filename = os.path.join(rundir, 'sequence-identity.txt')
system_filename = os.path.join(rundir, 'system.xml')
integrator_filename = os.path.join(rundir, 'integrator.xml')
protein_structure_filename = os.path.join(rundir, 'protein.pdb')
system_structure_filename = os.path.join(rundir, 'system.pdb')
protein_structure_filename_source = os.path.join(source_directory, 'implicit-refined.pdb')
system_structure_filename_source = os.path.join(source_directory, 'explicit-refined.pdb')
# Return if this directory has already been set up.
if os.path.exists(rundir):
if os.path.exists(template_filename) and os.path.exists(seqid_filename) and os.path.exists(system_filename) and os.path.exists(integrator_filename) and os.path.exists(protein_structure_filename) and os.path.exists(system_structure_filename): return
else:
# Construct run directory if it does not exist.
os.makedirs(rundir)
# Write template information.
[filepath, template_name] = os.path.split(source_directory)
outfile = open(template_filename, 'w')
outfile.write(template_name + '\n')
outfile.close()
# Copy the protein and system structure pdbs
shutil.copyfile(protein_structure_filename_source, protein_structure_filename)
shutil.copyfile(system_structure_filename_source, system_structure_filename)
# Read system, integrator, and state.
def readFileContents(filename):
import os.path
fullpath = os.path.join(source_directory, filename)
if os.path.exists(fullpath):
infile = open(fullpath, 'r')
elif os.path.exists(fullpath+'.gz'):
import gzip
infile = gzip.open(fullpath+'.gz', 'r')
else:
raise IOError('File %s not found' % filename)
contents = infile.read()
infile.close()
return contents
def writeFileContents(filename, contents):
outfile = open(filename, 'w')
outfile.write(contents)
outfile.close()
import simtk.openmm
system = simtk.openmm.XmlSerializer.deserialize(readFileContents('explicit-system.xml'))
state = simtk.openmm.XmlSerializer.deserialize(readFileContents('explicit-state.xml'))
# Substitute default box vectors.
box_vectors = state.getPeriodicBoxVectors()
system.setDefaultPeriodicBoxVectors(*box_vectors)
# Write sequence identity.
contents = readFileContents(os.path.join(source_directory, 'sequence-identity.txt'))
writeFileContents(seqid_filename, contents)
# Integrator settings.
import simtk.unit as units
constraint_tolerance = 1.0e-5
timestep = 2.0 * units.femtoseconds
collision_rate = 5.0 / units.picosecond
temperature = 300.0 * units.kelvin
# Create new integrator to use.
integrator = simtk.openmm.LangevinIntegrator(temperature, collision_rate, timestep)
# TODO: Make sure MonteCarloBarostat temperature matches set temperature.
# Serialize System.
writeFileContents(system_filename, simtk.openmm.XmlSerializer.serialize(system))
# Serialize Integrator
writeFileContents(integrator_filename, simtk.openmm.XmlSerializer.serialize(integrator))
# Create Context so we can randomize velocities.
platform = simtk.openmm.Platform.getPlatformByName('Reference')
context = simtk.openmm.Context(system, integrator, platform)
context.setPositions(state.getPositions())
context.setVelocities(state.getVelocities())
box_vectors = state.getPeriodicBoxVectors()
context.setPeriodicBoxVectors(*box_vectors)
# Create clones with different random initial velocities.
for clone_index in range(nclones):
context.setVelocitiesToTemperature(temperature)
state = context.getState(getPositions=True, getVelocities=True, getForces=True, getEnergy=True, getParameters=True, enforcePeriodicBox=True)
state_filename = os.path.join(rundir, 'state%d.xml' % clone_index)
writeFileContents(state_filename, simtk.openmm.XmlSerializer.serialize(state))
# Clean up.
del context, integrator, state, system
except Exception as e:
import traceback
print traceback.format_exc()
print str(e)
return
if __name__ == '__main__':
#
# GET ABSOLUTE PATHS
#
import os.path
# Input files.
targets_directory = os.path.abspath("targets") # target sequences for modeling
templates_directory = os.path.abspath("templates") # template structures for use in modeling
models_directory = os.path.abspath("models")
# Output files.
projects_directory = os.path.abspath("projects") # FAH projects directory
#
# READ TEMPLATE AND TARGET INDICES
#
targets_index_filename = os.path.join(targets_directory, 'targets.txt')
infile = open(targets_index_filename, 'r')
targets = [ line.strip() for line in infile ]
infile.close()
print '%d target sequences' % len(targets)
templates_index_filename = os.path.join(templates_directory, 'templates.txt')
infile = open(templates_index_filename, 'r')
templates = [ line.strip() for line in infile ]
infile.close()
print '%d template structures' % len(templates)
#
# SET UP PROJECTS
#
import os
if not os.path.exists(projects_directory):
os.makedirs(projects_directory)
for target in targets:
# Process only specified targets if directed.
if process_only_these_targets and (target not in process_only_these_targets): continue
target_directory = os.path.join(models_directory, target)
if not os.path.exists(target_directory): continue
print "-------------------------------------------------------------------------"
print "Building FAH OpenMM project for target %s" % (target)
print "-------------------------------------------------------------------------"
#
# BUILD A LIST OF VALID TEMPLATES
#
# Process all templates.
if verbose: print "Building list of valid templates..."
import os.path
valid_templates = list()
for template in templates:
# Check to make sure all files needed are present.
is_valid = True
filenames = ['explicit-system.xml', 'explicit-state.xml', 'explicit-integrator.xml']
for filename in filenames:
fullpath = os.path.join(target_directory, template, filename)
if not (os.path.exists(fullpath) or os.path.exists(fullpath+'.gz')):
is_valid = False
# Exclude those that are not unique by clustering.
unique_by_clustering = os.path.exists(os.path.join(target_directory, template, 'unique_by_clustering'))
if not unique_by_clustering:
is_valid = False
# TODO: Exclude if final potential energies from explicit solvent equilibration are too high.
# Append if valid.
if is_valid:
valid_templates.append(template)
nvalid = len(valid_templates)
if verbose: print "%d valid unique initial starting conditions found" % nvalid
#
# SORT BY SEQUENCE IDENTITY
#
if verbose: print "Sorting templates in order of decreasing sequence identity..."
import numpy
sequence_identities = numpy.zeros([nvalid], numpy.float32)
for (template_index, template) in enumerate(valid_templates):
filename = os.path.join(target_directory, template, 'sequence-identity.txt')
infile = open(filename, 'r')
contents = infile.readline().strip()
infile.close()
sequence_identity = float(contents)
sequence_identities[template_index] = sequence_identity
sorted_indices = numpy.argsort(-sequence_identities)
valid_templates = [ valid_templates[index] for index in sorted_indices ]
if verbose:
print "Sorted"
print sequence_identities[sorted_indices]
#
# CREATE PROJECT DIRECTORY
#
project_directory = os.path.join(projects_directory, target)
if not os.path.exists(projects_directory):
os.makedirs(projects_directory)
#
# BUILD RUNS IN PARALLEL
#
if verbose: print "Building RUNs in parallel..."
import multiprocessing
pool = multiprocessing.Pool()
results = list()
for (run_index, template) in enumerate(valid_templates):
source_directory = os.path.join(target_directory, template)
result = pool.apply_async(generateRun, args=(project_directory, source_directory, run_index, nclones, verbose))
results.append(result)
pool.close()
pool.join()
#
# ARCHIVE
#
if archive:
if verbose: print "Generating archive ..."
import commands
archive_filename = os.path.join(projects_directory, target + '.tgz')
project_directory = os.path.join(projects_directory, target)
commands.getoutput('tar zcf %s %s' % (archive_filename, project_directory))
|
choderalab/Ensembler2
|
scripts/attic/package-for-fah-openmm.py
|
Python
|
gpl-2.0
| 11,059
|
[
"OpenMM"
] |
e2e33e6c00459a78fcbfaa9b339810878cad487496bdf589bdafe9e26f5a9659
|
import theano
import theano.tensor as T
# from theano.tensor.shared_randomstreams import RandomStreams
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams # veel sneller
import numpy as np
numpy_rng = np.random.RandomState(123)
theano_rng = RandomStreams(numpy_rng.randint(2**30))
## samplers
def bernoulli(a):
# a is the bernoulli parameter
return theano_rng.binomial(size=a.shape, n=1, p=a, dtype=theano.config.floatX)
def gaussian(a, var=1.0):
# a is the mean, var is the variance (not std or precision!)
std = T.sqrt(var)
return theano_rng.normal(size=a.shape, avg=a, std=std, dtype=theano.config.floatX)
def multinomial(a):
# 0 = minibatches
# 1 = units
# 2 = states
p = a.reshape((a.shape[0]*a.shape[1], a.shape[2]))
# r 0 = minibatches * units
# r 1 = states
# this is the expected input for theano.nnet.softmax and theano_rng.multinomial
s = theano_rng.multinomial(n=1, pvals=p, dtype=theano.config.floatX)
return s.reshape(a.shape) # reshape back to original shape
def exponential(a):
uniform_samples = theano_rng.uniform(size=a.shape, dtype=theano.config.floatX)
return (-1 / a) * T.log(1 - uniform_samples)
def truncated_exponential(a, maximum=1.0):
uniform_samples = theano_rng.uniform(size=a.shape, dtype=theano.config.floatX)
return (-1 / a) * T.log(1 - uniform_samples*(1 - T.exp(-a * maximum)))
def truncated_exponential_mean(a, maximum=1.0):
# return (1 / a) + (maximum / (1 - T.exp(maximum*a))) # this is very unstable around a=0, even for a=0.001 it's already problematic.
# here is a version that switches depending on the magnitude of the input
m_real = (1 / a) + (maximum / (1 - T.exp(maximum*a)))
m_approx = 0.5 - (1./12)*a + (1./720)*a**3 - (1./30240)*a**5 # + (1./1209600)*a**7 # this extra term is unnecessary, it's accurate enough
return T.switch(T.abs_(a) > 0.5, m_real, m_approx)
def laplacian(b, mu=0.0):
# laplacian distributition is only exponential family when mu=0!
uniform_samples = theano_rng.uniform(size=b.shape, dtype=theano.config.floatX)
return mu - b*T.sgn(uniform_samples-0.5) * T.log(1 - 2*T.abs_(uniform_samples-0.5))
## approximate gamma sampler
# Two approximations for the gamma function are defined.
# Windschitl is very fast, but problematic close to 0, and using the reflection formula
# causes discontinuities.
# Lanczos on the other hand is extremely accurate, but slower.
def _log_gamma_windschitl(z):
"""
computes log(gamma(z)) using windschitl's approximation.
"""
return 0.5 * (T.log(2*np.pi) - T.log(z) + z * (2 * T.log(z) - 2 + T.log(z * T.sinh(1/z) + 1 / (810*(z**6)))))
def _log_gamma_ratio_windschitl(z, k):
"""
computes log(gamma(z+k)/gamma(z)) using windschitl's approximation.
"""
return _log_gamma_windschitl(z + k) - _log_gamma_windschitl(z)
def _log_gamma_lanczos(z):
# optimised by nouiz. thanks!
assert z.dtype.startswith("float")
# reflection formula. Normally only used for negative arguments,
# but here it's also used for 0 < z < 0.5 to improve accuracy in
# this region.
flip_z = 1 - z
# because both paths are always executed (reflected and
# non-reflected), the reflection formula causes trouble when the
# input argument is larger than one.
# Note that for any z > 1, flip_z < 0.
# To prevent these problems, we simply set all flip_z < 0 to a
# 'dummy' value. This is not a problem, since these computations
# are useless anyway and are discarded by the T.switch at the end
# of the function.
flip_z = T.switch(flip_z < 0, 1, flip_z)
log_pi = np.asarray(np.log(np.pi), dtype=z.dtype)
small = log_pi - T.log(T.sin(np.pi * z)) - _log_gamma_lanczos_sub(flip_z)
big = _log_gamma_lanczos_sub(z)
return T.switch(z < 0.5, small, big)
def _log_gamma_lanczos_sub(z): # expanded version
# optimised by nouiz. thanks!
# Coefficients used by the GNU Scientific Library
# note that vectorising this function and using .sum() turns out to be
# really slow! possibly because the dimension across which is summed is
# really small.
g = 7
p = np.array([0.99999999999980993, 676.5203681218851, -1259.1392167224028,
771.32342877765313, -176.61502916214059, 12.507343278686905,
-0.13857109526572012, 9.9843695780195716e-6,
1.5056327351493116e-7], dtype=z.dtype)
z = z - 1
x = p[0]
for i in range(1, g + 2):
x += p[i] / (z + i)
t = z + g + 0.5
pi = np.asarray(np.pi, dtype=z.dtype)
log_sqrt_2pi = np.asarray(np.log(np.sqrt(2 * np.pi)), dtype=z.dtype)
return log_sqrt_2pi + (z + 0.5) * T.log(t) - t + T.log(x)
def _log_gamma_ratio_lanczos(z, k):
"""
computes log(gamma(z+k)/gamma(z)) using the lanczos approximation.
"""
return _log_gamma_lanczos(z + k) - _log_gamma_lanczos(z)
def gamma_approx(k, theta=1):
"""
Sample from a gamma distribution using the Wilson-Hilferty approximation.
The gamma function itself is also approximated, so everything can be
computed on the GPU (using the Lanczos approximation).
"""
lmbda = 1/3.0 # according to Wilson and Hilferty
mu = T.exp(_log_gamma_ratio_lanczos(k, lmbda))
sigma = T.sqrt(T.exp(_log_gamma_ratio_lanczos(k, 2*lmbda)) - mu**2)
normal_samples = theano_rng.normal(size=k.shape, avg=mu, std=sigma, dtype=theano.config.floatX)
gamma_samples = theta * T.abs_(normal_samples ** 3)
# The T.abs_ is technically incorrect. The problem is that, without it, this formula may yield
# negative samples, which is impossible for the gamma distribution.
# It was also noted that, for very small values of the shape parameter k, the distribution
# of resulting samples is roughly symmetric around 0. By 'folding' the negative part
# onto the positive part, we still get a decent approximation because of this.
return gamma_samples
|
gupta-abhay/morb-theano
|
samplers.py
|
Python
|
unlicense
| 6,056
|
[
"Gaussian"
] |
088d605e08905200462883a15cc13748dacfc221cb71051d2f15a0c79c3e82a8
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.token import GalaxyToken
from ansible.utils import context_objects as co
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
# Required to initialise the GalaxyAPI object
context.CLIARGS._store = {'ignore_certs': False}
yield
co.GlobalCLIArgs._Singleton__instance = None
def test_api_no_auth():
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com")
actual = api._auth_header(required=False)
assert actual == {}
def test_api_no_auth_but_required():
expected = "No access token or username set. A token can be set with --api-key, with 'ansible-galaxy login', " \
"or set in ansible.cfg."
with pytest.raises(AnsibleError, match=expected):
GalaxyAPI(None, "test", "https://galaxy.ansible.com")._auth_header()
def test_api_token_auth():
token = GalaxyToken(token=u"my_token")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com", token=token)
actual = api._auth_header()
assert actual == {'Authorization': 'Token my_token'}
def test_api_basic_auth_password():
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com", username=u"user", password=u"pass")
actual = api._auth_header()
assert actual == {'Authorization': 'Basic dXNlcjpwYXNz'}
def test_api_basic_auth_no_password():
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com", username=u"user",)
actual = api._auth_header()
assert actual == {'Authorization': 'Basic dXNlcjo='}
|
amenonsen/ansible
|
test/units/galaxy/test_api.py
|
Python
|
gpl-3.0
| 1,934
|
[
"Galaxy"
] |
4065f149ad36517e56a06361e48474b9b2aa089cba16a878359e127eb022d393
|
import pysam
"""
This script is for filtering out soft-clipped reads that are speciously mapped.
It is of particular concern especially if no attempt has been made to filter
reads during mapping for duplicate regions such as are found between chloroplast
and mitochondrial regions. Even if filtering is applied, this filter will remove
soft clipped reads that are suspcious.
"""
import logging
import getopt
import sys
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
logging.basicConfig(level=logging.WARNING)
logging.debug("\tLogging level is set to debug\n")
def usage():
message="""
pysam_cigar_filter.py -s or --sam_file <sam or bam file>
-l --length <Minimum acceptable match length int default=20>
-o --out_prefix <out_prefix>
-h --help [returns help option]
"""
print (message)
return message
def messages(case,variable):
if case == "Reference_Trouble":
message="""
There are repeated reference names in sam header.
This must be fixed. The repeated name is : %s
""" %( variable )
elif case == "mapped_reads":
total_read_counter , mapped_read_counter = variable
message="""
Total of %i reads processed
Total of %i reads excluded because of soft clipping
Total of %i reads included.
""" %( total_read_counter, (total_read_counter - mapped_read_counter), mapped_read_counter)
print (message)
def main_argv_parse(argv):
logging.debug("inside main_argv_parse sys.argv: %s" %(argv[1:]))
argv_dict={"output_prefix":'' ,"sam_file":'',"length":20};
if len(argv[1:]) == 1 and ( argv[1] == '-h' or argv[1]== '--help'):
usage()
exit(0)
try:
opts, args = getopt.gnu_getopt(argv[1:],"h:s:l:o:",["help=","output_prefix=","sam_file=","length="])
logging.debug("opts: %s\nargs: %s "%(opts, args))
except getopt.error:
logging.critical("getopt.error argv: %s" %(" ".join(argv)))
usage()
sys.exit(2)
for opt , arg in opts:
if opt in ("-o","--output_prefix"):
argv_dict["output_prefix"]=arg
elif opt in ("-s","--sam_file"):
argv_dict["sam_file"]=arg
elif opt in ("-l","--length"):
argv_dict["length"]=int(arg)
elif opt in ("-h","--help"):
usage()
exit(0)
else :
print ("\n Option not recognized %s\n\n" %(opt))
usage()
# assert False, "unhandled option"
sys.exit(1)
return argv_dict
def alignment_file(sam):
ref_len_dict={}
# max_ref_pos={"dummy_name":["current_max_pos",["list_of_reads..."]]}
# min_ref_pos={"dummy_name":["current_max_pos",["list_of_reads..."]]}
max_ref_pos={}
min_ref_pos={}
if sam.split(".")[-1] =="sam" :
sam_file=pysam.AlignmentFile(sam, 'r')
elif sam.split(".")[-1] =="bam" :
sam_file=pysam.AlignmentFile(sam, 'rb' )
else:
print("\n\n\tNo sam or bam extension detected for %s\n\n" %(sam))
usage()
exit(1)
for SQ in sam_file.header["SQ"] :
#SQ["SN"] is the reference name in the header dict.
#SQ["LN"] is the length.
#Make a dictionary of the expected last position for reads to map to.
assert (SQ["SN"] not in ref_len_dict) , messages("Reference_Trouble", SQ["SN"])
ref_len_dict[SQ["SN"]] = SQ["LN"]
max_ref_pos[SQ["SN"]] = [20,[]]
min_ref_pos[SQ["SN"]]=[1000,[]]
#since max_ref_pos is just a set of names we can use it for
# for both max_ref_pos and min_ref_pos dictionaries.
return [sam_file, ref_len_dict, max_ref_pos, min_ref_pos ]
def cigar_read_filter(read, length,ref_len_dict , max_ref_pos, min_ref_pos, cutoff=5):
"""
bam tuple ids used by pysam.
right now we are using tupples
with ID 4 or 0
cigartuples returns list of tuples with
Formatted (ID_Field, Len)
M BAM_CMATCH 0
I BAM_CINS 1
D BAM_CDEL 2
N BAM_CREF_SKIP 3
S BAM_CSOFT_CLIP 4
H BAM_CHARD_CLIP 5
P BAM_CPAD 6
= BAM_CEQUAL 7
X BAM_CDIFF 8
B BAM_CBACK 9
check that read is greater than or equal to
minimum allowable length.\
"""
ret_read=False
if length <= read.reference_length :
soft_clip=4
match=0
cigar=read.cigartuples
start=cigar[0]
end=cigar[-1]
# soft clippling by definition occurs at either end.
# if it occurs at both ends it will be excluded as poor quality mapping.
# soft clipping will only be allowed on the first or last mapped base pair.
if (start[0] == soft_clip and end[0]== soft_clip ):
pass
# if soft clipping is at the end for the last base, we want to allow allow that for last base.
elif end[0] == soft_clip :
if end[1] < cutoff:
ret_read=True
elif max_ref_pos[read.reference_name][0]< read.reference_end:
max_ref_pos[read.reference_name]=[read.reference_end,[read]]
elif max_ref_pos[read.reference_name][0]== read.reference_end:
max_ref_pos[read.reference_name][1].append(read)
else:
pass
# if the read is soft clipped at the 3prime end less than the
# the maximum there is nothing to do with.
# if reads do not start mapping at the first base of the contig we want to be able to catch
# the reads that are the very first ones mapped and allow soft clipping.
elif start[0]== soft_clip :
if start[1] < cutoff:
ret_read=True
elif read.reference_name in min_ref_pos:
if read.reference_start == 0:
ret_read=True
del min_ref_pos[read.reference_name]
logging.debug("Absolute Minimum Found for %s == 0" %(read.reference_name))
elif read.reference_start < min_ref_pos[read.reference_name][0]:
logging.debug("New Minimum found for %s == new %i old %i " %(read.reference_name, read.reference_start, min_ref_pos[read.reference_name][0]))
min_ref_pos[read.reference_name]=[read.reference_start,[read]]
elif read.reference_start == min_ref_pos[read.reference_name][0] :
# print(read)
min_ref_pos[read.reference_name][1].append(read)
elif read.reference_name not in min_ref_pos and read.reference_start == 0 :
ret_read=True
else:
ret_read=True
if ret_read is True:
return read
else :
return None
def out_from_read_dict(out, read_dict, mapped_read_counter):
for contigs in read_dict:
for reads in read_dict[contigs][1]:
mapped_read_counter+=1
out.write(reads)
return mapped_read_counter
def soft_clip_filter(sam_file, out, length, ref_len_dict, max_ref_pos, min_ref_pos ):
"""
This function iterates through the sam file and returns reads that are soft clipped on either end of
the alignment. It also allows for one end to be soft-clipped up to 5 bp, but not both ends. This is
to allow for the case where not all of an adapter was removed. If a global alignment is forced in
this situation, then it introduces artificial noise into the alignment resulting in poorly called
bases.
"""
mapped_read_counter=0
total_read_counter=0
for read in sam_file:
total_read_counter+=1
out_read =cigar_read_filter(read, length, ref_len_dict, max_ref_pos, min_ref_pos )
if out_read is not None != 0 :
logging.debug( "read passed")
# logging.debug(read.cigartuples, read.reference_start, read.reference_end)
out.write(out_read)
mapped_read_counter+=1
mapped_read_counter = out_from_read_dict(out, min_ref_pos, mapped_read_counter)
mapped_read_counter = out_from_read_dict(out, max_ref_pos,mapped_read_counter)
messages("mapped_reads",[total_read_counter,mapped_read_counter])
if __name__ == "__main__" :
argv=main_argv_parse(sys.argv)
logging.debug("argv :")
logging.debug(argv)
sam_file , ref_len_dict, max_ref_pos, min_ref_pos = alignment_file(argv["sam_file"])
logging.debug("argv[\"length\"]")
logging.debug(argv["length"])
out_bam=pysam.AlignmentFile(str(argv["output_prefix"])+".bam", "wb", header=sam_file.header)
soft_clip_filter( sam_file, out_bam ,argv["length"], ref_len_dict, max_ref_pos, min_ref_pos)
out_bam.close()
sam_file.close()
pysam.sort("-o", "sorted_"+str(argv["output_prefix"])+".bam" , str(argv["output_prefix"])+".bam" )
|
NDHall/pysam_tools
|
cigar_filter/pysam_cigar_filter.py
|
Python
|
mit
| 9,273
|
[
"pysam"
] |
9152173b3a40c51f4820333a537f51bfe2fb9bd4f6a30a94214723344ad7784a
|
# nmda.py ---
#
# Filename: nmda.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Mar 17 14:07:20 2010 (+0530)
# Version:
# Last-Updated: Sat Oct 15 19:08:44 2011 (+0530)
# By: subha
# Update #: 292
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import unittest
import uuid
import moose
has_numpy = True
try:
import numpy
except ImportError:
has_numpy = False
pi = 3.141592
class TestNMDAChan(unittest.TestCase):
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
self.testId = 0
# simulation settings
self.simdt = 1e-5
self.simtime = 0.5
# compartment properties
self.dia = 15e-6
self.len = 20e-6
self.ra = 250.0 * 1e-2
self.g_pas = 2e-5 * 1e4
self.e_pas = -65e-3
self.cm = 0.9e-6 * 1e4
# stimulus for presynaptic compartment
self.stim_amp = 0.1e-9
self.stim_dur = 20e-3
self.stim_delay = 20e-3
# parameters for the NMDA channel
self.tau1 = 130.5e-3
self.tau2 = 5e-3
self.MgConc = 1.5
self.saturation = 0.25 # NMDA_saturation_fact. This is multiplied with weight of the NMDA object (equivalent to Gbar in MOOSE.
self.NMDA_weight = 0.25e-3 * 1e-6 # uS->S : This is the weight specified for the NetConn object in NEURON.
self.Gbar = 1.0e-4 # Not to be used except for calculating saturation
def setUp(self):
self.testId = uuid.uuid4().int
self.container = moose.Neutral('testNMDA_%d' % (self.testId))
self.nmda = moose.NMDAChan('nmda', self.container)
self.nmda_gk = moose.Table('nmda_gk', self.container)
self.nmda_gk.stepMode = 3
self.nmda_gk.connect('inputRequest', self.nmda, 'Gk')
self.nmda_unblocked = moose.Table('nmda_unblocked', self.container)
self.nmda_unblocked.stepMode = 3
self.nmda_unblocked.connect('inputRequest', self.nmda, 'unblocked')
def setParameters(self):
self.nmda.tau1 = self.tau1
self.nmda.tau2 = self.tau2
self.nmda.Gbar = self.Gbar
self.nmda.MgConc = self.MgConc
self.nmda.saturation = self.saturation
def testSetGet(self):
self.setParameters()
self.assertAlmostEqual(self.nmda.tau1, self.tau1)
self.assertAlmostEqual(self.nmda.tau2, self.tau2)
self.assertAlmostEqual(self.nmda.Gbar, self.Gbar)
self.assertAlmostEqual(self.nmda.MgConc, self.MgConc)
self.assertAlmostEqual(self.nmda.saturation, self.saturation)
def setupNetwork(self):
self.setParameters()
self.somaA = moose.Compartment('a', self.container)
self.somaA.Rm = 1.0 /(self.g_pas * self.len * self.dia * pi)
self.somaA.Ra = self.ra / (self.dia * self.dia * pi / 4.0)
self.somaA.Cm = self.cm * self.len * self.dia * pi
self.somaA.Em = self.e_pas
self.somaA.initVm = self.e_pas
self.pulsegen = moose.PulseGen('pulsegen', self.container)
self.pulsegen.firstLevel = self.stim_amp
self.pulsegen.firstDelay = self.stim_delay
self.pulsegen.firstWidth = self.stim_dur
self.pulsegen.secondDelay = 1e9
self.pulsegen.connect('outputSrc', self.somaA, 'injectMsg')
self.somaB = moose.Compartment('b', self.container)
self.somaB.Rm = 1.0 /(self.g_pas * self.len * self.dia * pi)
self.somaB.Ra = self.ra / (self.dia * self.dia * pi / 4.0)
self.somaB.Cm = self.cm * self.len * self.dia * pi
self.somaB.Em = self.e_pas
self.somaB.initVm = self.e_pas
self.vmA = moose.Table('Vm_A', self.container)
self.vmA.stepMode = 3
self.vmA.connect('inputRequest', self.somaA, 'Vm')
self.vmB = moose.Table('Vm_B', self.container)
self.vmB.stepMode = 3
self.vmB.connect('inputRequest', self.somaB, 'Vm')
self.nmda.connect('channel', self.somaB, 'channel')
self.spikegen = moose.SpikeGen('spike', self.container)
self.spikegen.threshold = 0.0
self.spikegen.delay = 0.05e-3
self.spikegen.edgeTriggered = 1
self.spikegen.connect('event', self.nmda, 'synapse')
print 'Connecting spikegen:', self.somaA.connect('VmSrc', self.spikegen, 'Vm')
self.assertEqual(self.nmda.numSynapses, 1)
self.nmda.setWeight(0, self.NMDA_weight)
self.assertAlmostEqual(self.NMDA_weight, self.nmda.getWeight(0))
def testGkChanges(self):
self.setupNetwork()
moose.context.setClock(0, self.simdt)
moose.context.setClock(1, self.simdt)
moose.context.setClock(2, self.simdt)
moose.context.reset()
moose.context.step(self.simtime)
outfile = open('moose_nmda.dat', 'w')
t = 0.0
tvec = []
for ii in range(len(self.nmda_gk)):
outfile.write('%g %g %g %g\n' % (t, self.vmA[ii], self.vmB[ii], self.nmda_gk[ii]/self.nmda_unblocked[ii] if self.nmda_unblocked[ii] != 0 else self.nmda_gk[ii])) # We save nmda_gk/nmda_unblocked as neuron does not compute the gk correctly.
tvec.append(t)
t += self.simdt
outfile.close()
nrn_data = numpy.loadtxt('neuron_nmda.dat').transpose()
if has_numpy:
interpolated_vb = numpy.interp(tvec, nrn_data[0]*1e-3, nrn_data[2]*1e-3)
error_vec = interpolated_vb - numpy.array(self.vmB)
square_error = error_vec * error_vec
rms_error = numpy.sqrt(square_error.sum() / len(error_vec))
relative_error = rms_error / interpolated_vb.max()
self.assertTrue(numpy.abs(relative_error) < 1e-3) # We just choose arbitrarily that relative error should be less than a thousandth
if __name__ == '__main__':
unittest.main()
#
# nmda.py ends here
|
BhallaLab/moose-thalamocortical
|
TESTS/pymoose/test_nmda.py
|
Python
|
lgpl-2.1
| 6,693
|
[
"MOOSE",
"NEURON"
] |
a1e1fd37f53f5e5eb107d2c658e98ace5f639314f275c0d0f3b399c4c908b629
|
#!/usr/bin/env python
# Copyright 2017.
# Michael A. DeJesus, Chaitra Ambadipudi, and Thomas R. Ioerger.
#
#
# This file is part of TRANSIT.
#
# TRANSIT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License.
#
#
# TRANSIT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TRANSIT. If not, see <http://www.gnu.org/licenses/>.
import sys
import glob
import os
import time
import math
import re
import shutil
import platform
import gzip
try:
import wx
import wx.lib.filebrowsebutton
hasWx = True
except Exception as e:
hasWx = False
from pytpp.tpp_tools import *
if hasWx:
class TPPIcon(wx.StaticBitmap):
def __init__(self, panel, flag, bmp, tooltip=""):
wx.StaticBitmap.__init__(self, panel, flag, bmp)
tp = wx.ToolTip(tooltip)
self.SetToolTip(tp)
class MyForm(wx.Frame):
def __init__(self,vars):
self.vars = vars
initialize_globals(self.vars)
wx.Frame.__init__(self, None, wx.ID_ANY, "TPP: Tn-Seq PreProcessor") # v%s" % vars.version
# Add a panel so it looks the correct on all platforms
panel = wx.ScrolledWindow( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,-1 ), wx.HSCROLL|wx.VSCROLL )
panel.SetScrollRate( 5, 5 )
panel.SetMaxSize( wx.Size( -1, 1000 ) )
sizer = wx.BoxSizer(wx.VERTICAL)
self.list_ctrl = None
self.InitMenu()
self.InitFiles(panel,sizer)
buttonrow = wx.BoxSizer(wx.HORIZONTAL)
btn = wx.Button(panel, label="Start")
btn.Bind(wx.EVT_BUTTON, self.map_reads)
buttonrow.Add(btn,0,0,0,10)
btn = wx.Button(panel, label="Quit")
btn.Bind(wx.EVT_BUTTON, self.OnQuit)
buttonrow.Add(btn,0,0,0,10)
sizer.Add(buttonrow,0,0,0)
self.InitList(panel,sizer)
panel.SetSizer(sizer)
# self.SetSize((1305, 700))
self.SetSize((900, 750))
#self.SetTitle('Simple menu')
self.Centre()
#self.Show(True)
self.pid = None
#
def InitFiles(self,panel,sizer):
vars = self.vars
# Define
bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (16, 16))
# REFERENCE
sizer3 = wx.BoxSizer(wx.HORIZONTAL)
label3 = wx.StaticText(panel, label='Choose a reference genome (FASTA) (REQUIRED):',size=(330,-1))
sizer3.Add(label3,0,wx.ALIGN_CENTER_VERTICAL,0)
self.picker3 = wx.lib.filebrowsebutton.FileBrowseButton(panel, id=wx.ID_ANY, dialogTitle='Please select the reference genome', fileMode=wx.FD_OPEN, fileMask='*.fna;*.fasta;*.fa', size=(400,30), startDirectory=os.path.dirname(vars.ref), initialValue=vars.ref, labelText='')
sizer3.Add(self.picker3, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer3.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Select a reference genome in FASTA format (can be a multi-contig fasta file)."), flag=wx.CENTER, border=0)
sizer3.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer3,0,wx.EXPAND,0)
# REPLICON ID NAMES
sizer_replicon_ids = wx.BoxSizer(wx.HORIZONTAL)
label_replicon_ids = wx.StaticText(panel, label='ID names for each replicon: \n(if genome has multiple contigs)',size=(340,-1))
sizer_replicon_ids.Add(label_replicon_ids,0,wx.ALIGN_CENTER_VERTICAL,0)
self.replicon_ids = wx.TextCtrl(panel,value=vars.replicon_ids,size=(400,30))
sizer_replicon_ids.Add(self.replicon_ids, proportion=1.0, flag=wx.EXPAND|wx.ALL, border=5)
sizer_replicon_ids.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Specify names of each contig within the reference genome separated by commas (if using wig_gb_to_csv.py you must use the contig names in the Genbank file). Only required if there are multiple contigs; can leave blank if there is just one sequence.\nEnter 'auto' for autogenerated ids."), flag=wx.CENTER, border=0)
sizer_replicon_ids.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer_replicon_ids,0,wx.EXPAND,0)
# READS 1
sizer1 = wx.BoxSizer(wx.HORIZONTAL)
label1 = wx.StaticText(panel, label='Choose the Fastq file for read 1 (REQUIRED):',size=(330,-1))
sizer1.Add(label1,0,wx.ALIGN_CENTER_VERTICAL,0)
self.picker1 = wx.lib.filebrowsebutton.FileBrowseButton(panel, id=wx.ID_ANY, dialogTitle='Please select the .fastq file for read 1', fileMode=wx.FD_OPEN, fileMask='*.fastq;*.fq;*.reads;*.fasta;*.fa;*.fastq.gz', size=(400,30), startDirectory=os.path.dirname(vars.fq1), initialValue=vars.fq1, labelText='',changeCallback=self.OnChanged2)
sizer1.Add(self.picker1, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer1.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Select a file containing the reads in .FASTQ (or compressed FASTQ) format."), flag=wx.CENTER, border=0)
sizer1.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer1,0,wx.EXPAND,0)
# READS 2
sizer2 = wx.BoxSizer(wx.HORIZONTAL)
label2 = wx.StaticText(panel, label='Choose the Fastq file for read 2:',size=(330,-1))
sizer2.Add(label2,0,wx.ALIGN_CENTER_VERTICAL,0)
self.picker2 = wx.lib.filebrowsebutton.FileBrowseButton(panel, id=wx.ID_ANY, dialogTitle='Please select the .fastq file for read 2', fileMode=wx.FD_OPEN, fileMask='*.fastq;*.fq;*.reads;*.fasta;*.fa;*.fastq.gz', size=(400,30), startDirectory=os.path.dirname(vars.fq2), initialValue=vars.fq2, labelText='', changeCallback=self.OnChanged2)
sizer2.Add(self.picker2, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer2.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Select a file containing the pair-end reads in .FASTQ (or compressed FASTQ) format. Optional."), flag=wx.CENTER, border=0)
sizer2.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer2,0,wx.EXPAND,0)
# OUTPUT PREFIX
sizer5 = wx.BoxSizer(wx.HORIZONTAL)
label5 = wx.StaticText(panel, label='Prefix to use for output filenames (REQUIRED):',size=(340,-1))
sizer5.Add(label5,0,wx.ALIGN_CENTER_VERTICAL,0)
self.base = wx.TextCtrl(panel,value=vars.base,size=(400,30))
sizer5.Add(self.base, proportion=1.0, flag=wx.EXPAND|wx.ALL, border=5)
sizer5.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Select a prefix that will be used when writing output files"), flag=wx.CENTER, border=0)
sizer5.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer5,0,wx.EXPAND,0)
# PROTOCOL
sizer_protocol = wx.BoxSizer(wx.HORIZONTAL)
label_protocol = wx.StaticText(panel, label='Protocol used:',size=(340,-1))
sizer_protocol.Add(label_protocol,0,wx.ALIGN_CENTER_VERTICAL,0)
self.protocol = wx.ComboBox(panel,choices=['Sassetti','Mme1', 'Tn5'],size=(400,30))
self.protocol.SetStringSelection(vars.protocol)
sizer_protocol.Add(self.protocol, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
protocol_tooltip_text = """Select which protocol used to prepare the sequencing samples. Default values will populate the other fields.
The Sassetti protocol generally assumes the reads include the primer prefix and part of the transposon sequence, followed by genomic sequence. It also assumes reads are sequenced in the forward direction. Barcodes are in read 2, along with genomic DNA from the other end of the fragment.
The Mme1 protocol generally assumes reads do NOT include the primer prefix, and that the reads are sequenced in the reverse direction"""
sizer_protocol.Add(TPPIcon(panel, wx.ID_ANY, bmp, protocol_tooltip_text), flag=wx.CENTER, border=0)
sizer_protocol.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer_protocol,0,wx.EXPAND,0)
self.Bind(wx.EVT_COMBOBOX, self.OnProtocolSelection, id=self.protocol.GetId())
# TRANSPOSON
sizer8 = wx.BoxSizer(wx.HORIZONTAL)
label8 = wx.StaticText(panel, label='Transposon used:',size=(340,-1))
sizer8.Add(label8,0,wx.ALIGN_CENTER_VERTICAL,0)
self.transposon = wx.ComboBox(panel,choices=['Himar1','Tn5', 'pre-trimmed','[Custom]'],size=(400,30))
self.transposon.SetStringSelection(vars.transposon)
sizer8.Add(self.transposon, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer8.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Select the transposon used to construct the TnSeq libraries. This will automatically populate the primer prefix field. Select custom to specify your own sequence."), flag=wx.CENTER, border=0)
sizer8.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer8,0,wx.EXPAND,0)
# PRIMER SEQUENCE
sizer4 = wx.BoxSizer(wx.HORIZONTAL)
label4 = wx.StaticText(panel, label='Primer sequence:',size=(340,-1))
sizer4.Add(label4,0,wx.ALIGN_CENTER_VERTICAL,0)
self.prefix = wx.TextCtrl(panel,value=str(vars.prefix), size=(400,30))
sizer4.Add(self.prefix, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer4.Add(TPPIcon(panel, wx.ID_ANY, bmp, "If present in the reads, specify the primer sequence. If it has been stripped away already, leave this field empty."), flag=wx.CENTER, border=0)
sizer4.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer4,0,wx.EXPAND,0)
self.Bind(wx.EVT_COMBOBOX, self.OnTransposonSelection, id=self.transposon.GetId())
self.prefix.Bind(wx.EVT_TEXT, self.OnChangePrimerPrefix)
# MAX READS
sizer6 = wx.BoxSizer(wx.HORIZONTAL)
label6 = wx.StaticText(panel, label='Max reads (leave blank to use all):',size=(340,-1))
sizer6.Add(label6,0,wx.ALIGN_CENTER_VERTICAL,0)
self.maxreads = wx.TextCtrl(panel,value=str(vars.maxreads),size=(150,30)) # or "" if not defined? can't write to tpp.cfg
sizer6.Add(self.maxreads, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer6.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Maximum reads to use from the reads files. Useful for running only a portion of very large number of reads. Leave blank to use all the reads."), flag=wx.CENTER, border=0)
sizer6.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer6,0,wx.EXPAND,0)
# MISMATCHES
sizer7 = wx.BoxSizer(wx.HORIZONTAL)
label7 = wx.StaticText(panel, label='Mismatches allowed in Tn prefix:',size=(340,-1))
sizer7.Add(label7,0,wx.ALIGN_CENTER_VERTICAL,0)
self.mismatches = wx.TextCtrl(panel,value=str(vars.mm1),size=(150,30))
sizer7.Add(self.mismatches, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer7.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Number of mismatches allowed in the tn-prefix before discarding the read."), flag=wx.CENTER, border=0)
sizer7.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer7,0,wx.EXPAND,0)
# PRIMER_START_WINDOW
sizer_primer_start = wx.BoxSizer(wx.HORIZONTAL)
label_primer_start = wx.StaticText(panel, label='Start of window to look for prefix (Tn terminus):', size=(340,-1))
sizer_primer_start.Add(label_primer_start,0,wx.ALIGN_CENTER_VERTICAL,0)
primer_start_window = "%s,%s" % (vars.primer_start_window[0],vars.primer_start_window[1])
self.primer_start = wx.TextCtrl(panel,value=primer_start_window,size=(150,30))
sizer_primer_start.Add(self.primer_start, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer_primer_start.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Region in read 1 to search for start of prefix seq (i.e. end of transposon)."), flag=wx.CENTER, border=0)
sizer_primer_start.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer_primer_start,0,wx.EXPAND,0)
# # WINDOW SIZE # [RJ] This block is to add the acceptance of a set window size for setting P,Q parameters
# sizer_window_size = wx.BoxSizer(wx.HORIZONTAL)
# label_window_size = wx.StaticText(panel, label='Window size for Tn prefix in read:', size=(340,-1))
# sizer_window_size.Add(label_window_size,0,wx.ALIGN_CENTER_VERTICAL,0)
# self.window_size = wx.TextCtrl(panel,value=str(vars.window_size),size=(150,30))
# sizer_window_size.Add(self.window_size, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
# sizer_window_size.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Window size for extract_staggered() to look for start of Tn prefix."), flag=wx.CENTER, border=0)
# sizer_window_size.Add((10, 1), 0, wx.EXPAND)
# sizer.Add(sizer_window_size,0,wx.EXPAND,0)
# BWA
sizer0 = wx.BoxSizer(wx.HORIZONTAL)
label0 = wx.StaticText(panel, label='BWA executable (REQUIRED):',size=(330,-1))
sizer0.Add(label0,0,wx.ALIGN_CENTER_VERTICAL,0)
self.picker0 = wx.lib.filebrowsebutton.FileBrowseButton(panel, id = wx.ID_ANY, size=(400,30), dialogTitle='Path to BWA', fileMode=wx.FD_OPEN, fileMask='bwa*', startDirectory=os.path.dirname(vars.bwa), initialValue=vars.bwa, labelText='')
sizer0.Add(self.picker0, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer0.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Specify a path to the BWA executable (including the executable)."), flag=wx.CENTER, border=0)
sizer0.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer0,0,wx.EXPAND,0)
self.bwa_alg = wx.ComboBox(panel,choices=["use algorithm 'aln'", "use algorithm 'mem'"],size=(200,30))
if vars.bwa_alg=='aln': self.bwa_alg.SetSelection(0)
else: self.bwa_alg.SetSelection(1) # default
sizer0.Add(self.bwa_alg, proportion=0.5, flag=wx.EXPAND|wx.ALL, border=5) ##
self.bwa_alg.Bind(wx.EVT_COMBOBOX, self.OnBwaAlgSelection, id=self.bwa_alg.GetId())
sizer0.Add(TPPIcon(panel, wx.ID_ANY, bmp, "'mem' is considered to do a better job at mapping reads, but 'aln' is available as an alternative."), flag=wx.CENTER, border=0)
sizer0.Add((10, 1), 0, wx.EXPAND)
#sizer.Add(sizer0,0,wx.EXPAND,0)
# BWA FLAGS
sizer8 = wx.BoxSizer(wx.HORIZONTAL)
label8 = wx.StaticText(panel, label='BWA flags (Optional)',size=(340,-1))
sizer8.Add(label8,0,wx.ALIGN_CENTER_VERTICAL,0)
self.flags = wx.TextCtrl(panel,value=vars.flags,size=(400,30))
sizer8.Add(self.flags, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
sizer8.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Use this textbox to enter any desired flags for the BWA alignment. For example, to limit the number of mismatches to 1, type: -k 1. See the BWA documentation for all possible flags."), flag=wx.CENTER, border=0)
sizer8.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer8,0,wx.EXPAND,0)
# BARSEQ CATALOG
sizer9 = wx.BoxSizer(wx.HORIZONTAL)
label9 = wx.StaticText(panel, label='BarSeq Catalog file:',size=(120,-1))
sizer9.Add(label9,0,wx.ALIGN_CENTER_VERTICAL,0)
self.barseq_select = wx.ComboBox(panel,choices=['this is not a Barseq dataset','read catalog file','write catalog file'],size=(200,30)) ## # does a BoxSizer use wx.HORIZONTAL, not wx.EXPAND?
self.barseq_select.SetSelection(0)
sizer9.Add(self.barseq_select, proportion=0.5, flag=wx.EXPAND|wx.ALL, border=5) ##
self.picker9 = wx.lib.filebrowsebutton.FileBrowseButton(panel, id=wx.ID_ANY, dialogTitle='Please select the Barseq catalog filename', fileMode=wx.FD_OPEN, size=(400,30), startDirectory=os.path.dirname(vars.fq2), initialValue="", labelText='', ) # no need for this: changeCallback=self.OnChanged9 ; initialValue set below ; no file mask
sizer9.Add(self.picker9, proportion=1, flag=wx.EXPAND|wx.ALL, border=5)
if vars.barseq_catalog_in!=None:
self.barseq_select.SetSelection(1)
self.picker9.SetValue(vars.barseq_catalog_in)
if vars.barseq_catalog_out!=None:
self.barseq_select.SetSelection(2)
self.picker9.SetValue(vars.barseq_catalog_out)
sizer9.Add(TPPIcon(panel, wx.ID_ANY, bmp, "Select a filename for BarSeq catalog."), flag=wx.CENTER, border=0)
sizer9.Add((10, 1), 0, wx.EXPAND)
sizer.Add(sizer9,0,wx.EXPAND,0)
#
def OnBwaAlgSelection(self, event):
if 'aln' in self.bwa_alg.GetValue():
self.vars.bwa_alg = "aln"
elif 'mem' in self.bwa_alg.GetValue():
self.vars.bwa_alg = "mem"
else:
self.vars.bwa_alg = "[Custom]"
#
def OnTransposonSelection(self, event):
if self.transposon.GetValue()=="Tn5":
self.prefix.SetValue("TAAGAGACAG")
self.transposon.SetStringSelection("Tn5")
self.vars.transposon = "Tn5"
elif self.transposon.GetValue()=="Himar1":
self.prefix.SetValue("ACTTATCAGCCAACCTGTTA")
self.transposon.SetStringSelection("Himar1")
self.vars.transposon = "Himar1"
elif self.transposon.GetValue()=="pre-trimmed":
self.transposon.SetValue("pre-trimmed")
self.transposon.SetStringSelection("pre-trimmed")
self.vars.transposon = "pre-trimmed"
self.prefix.SetValue('""')
else:
self.transposon.SetValue("[Custom]")
self.transposon.SetStringSelection("[Custom]")
self.vars.transposon = "[Custom]"
#
def OnProtocolSelection(self, event):
self.vars.transposon = self.protocol.GetValue()
if self.protocol.GetValue()=="Tn5":
self.prefix.SetValue("TAAGAGACAG")
self.transposon.SetStringSelection("Tn5")
self.vars.transposon = "Tn5"
elif self.protocol.GetValue()=="Sassetti":
self.prefix.SetValue("ACTTATCAGCCAACCTGTTA")
self.transposon.SetStringSelection("Himar1")
self.vars.transposon = "Himar1"
elif self.protocol.GetValue()=="Mme1":
self.prefix.SetValue('""')
self.transposon.SetStringSelection("pre-trimmed")
self.vars.transposon = ""
#
def OnChanged(self, str_path):
print("changed")
value = os.path.basename(str_path).split('.')[0]
if '_R1' in value or '_R2':
value = value.split('_')[0]
#self.base.SetValue(value)
#
def OnChanged2(self, event):
value2 = os.path.basename(self.picker2.GetValue()).split('.')[0]
value1 = os.path.basename(self.picker1.GetValue()).split('.')[0]
value = os.path.commonprefix([value1, value2])
#self.base.SetValue(value)
#self.base.Refresh()
#
def OnChangePrimerPrefix(self, event):
#self.transposon.SetValue("[Custom]")
pass
#
def InitList(self,panel,sizer):
self.list_ctrl = wx.ListCtrl(panel, size=(500,210), style=wx.LC_REPORT|wx.BORDER_SUNKEN)
self.list_ctrl.InsertColumn(0, 'Dataset (*.tn_stats)',width=300)
self.list_ctrl.InsertColumn(1, 'total reads',wx.LIST_FORMAT_RIGHT,width=125)
self.list_ctrl.InsertColumn(2, 'Tn prefix', wx.LIST_FORMAT_RIGHT,width=125)
self.list_ctrl.InsertColumn(3, 'R1_mapped', wx.LIST_FORMAT_RIGHT,width=90)
self.list_ctrl.InsertColumn(4, 'R2_mapped', wx.LIST_FORMAT_RIGHT,width=90)
self.list_ctrl.InsertColumn(5, 'mapped\nreads', wx.LIST_FORMAT_RIGHT,width=90)
self.list_ctrl.InsertColumn(6, 'template\ncount', wx.LIST_FORMAT_RIGHT,width=90)
self.list_ctrl.InsertColumn(7, 'TAs hit', wx.LIST_FORMAT_RIGHT,width=90)
self.list_ctrl.InsertColumn(8, 'insertion\ndensity',wx.LIST_FORMAT_RIGHT,width=90)
self.list_ctrl.InsertColumn(9, 'NZmean', wx.LIST_FORMAT_RIGHT,width=90)
self.list_ctrl.InsertColumn(10, 'maxcount', wx.LIST_FORMAT_RIGHT,width=90)
self.list_ctrl.InsertColumn(11, 'primer', wx.LIST_FORMAT_RIGHT,width=90)
self.list_ctrl.InsertColumn(12, 'vector',wx.LIST_FORMAT_RIGHT,width=90)
sizer.Add(self.list_ctrl, 0, wx.ALL|wx.EXPAND, 10)
#
def InitMenu(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
quit_menuitem = fileMenu.Append(wx.ID_EXIT, 'Quit', 'Quit application')
self.Bind(wx.EVT_MENU, self.OnQuit, quit_menuitem)
menubar.Append(fileMenu, '&File')
self.SetMenuBar(menubar)
#
def addNewDataset(self, event):
dlg = wx.FileDialog(
self, message="Choose a file",
defaultDir=".",
defaultFile="",
wildcard="*.wig",
style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR
)
if dlg.ShowModal() == wx.ID_OK:
paths = dlg.GetPaths()
for path in paths:
print("analyzing dataset:",path)
analyze_dataset(path)
dlg.Destroy()
self.update_dataset_list()
#
def update_dataset_list(self):
if self.list_ctrl==None: return
self.list_ctrl.DeleteAllItems()
self.index = 0
datasets = []
for fname in glob.glob("*.tn_stats"):
filedate = os.path.getmtime(fname)
datasets.append((filedate,fname))
datasets.sort(reverse=True)
for (filedate,fname) in datasets:
stats = self.read_stats_file(fname)
ntrim = stats.get("TGTTA_reads","?")
if ntrim=="?": ntrim = stats.get("trimmed_reads","?")
vals = [stats.get("total_reads","?"),ntrim,stats.get("reads1_mapped", "?"),stats.get("reads2_mapped","?"),stats.get("mapped_reads","?"),stats.get("template_count","?"), stats.get("TAs_hit","?"), stats.get("density", "?"), stats.get("NZ_mean", "?"), stats.get("max_count", "?"), stats.get("primer_matches:","?"),stats.get("vector_matches:","?")]
dsname = "[%s] %s" % (time.strftime("%m/%d/%y",time.localtime(filedate)),fname[:fname.rfind('.')])
self.add_data(dsname, vals)
#
def read_stats_file(self,fname):
stats = {}
for line in open(fname):
w = line.rstrip().split()
val = ""
if len(w)>2: val = w[2]
stats[w[1]] = val
return stats
#
def add_data(self, dataset,vals):
self.list_ctrl.InsertItem(self.index, dataset)
for i in range(1, len(vals)+1):
self.list_ctrl.SetItem(self.index, i, vals[i-1])
self.index += 1
#
def OnQuit(self, e):
print("Quitting TPP. Good bye.")
self.vars.action = "quit"
self.Close()
return 0
#
def map_reads(self,event):
# add bwa path, prefix
bwapath = self.picker0.GetValue()
fq1, fq2, ref, base, prefix, maxreads = self.picker1.GetValue(), self.picker2.GetValue(), self.picker3.GetValue(), self.base.GetValue(), self.prefix.GetValue(), self.maxreads.GetValue()
mm1 = self.mismatches.GetValue()
try: mm1 = int(mm1)
except Exception: mm1 = 1
self.vars.flags = self.flags.GetValue()
self.vars.transposon = self.transposon.GetStringSelection()
self.vars.protocol = self.protocol.GetValue()
self.vars.bwa = bwapath
self.vars.fq1 = fq1
self.vars.fq2 = fq2
self.vars.ref = ref
self.vars.base = base
self.vars.mm1 = mm1
self.vars.prefix = prefix
#self.vars.window_size = int(self.window_size.GetValue())
if 'aln' in self.bwa_alg.GetValue():
self.vars.bwa_alg = 'aln'
elif 'mem' in self.bwa_alg.GetValue():
self.vars.bwa_alg = 'mem'
self.vars.replicon_ids = self.replicon_ids.GetValue().split(',')
v = self.primer_start.GetValue()
if v!="":
v = v.split(',')
self.vars.primer_start_window = (int(v[0]),int(v[1]))
if maxreads == '': self.vars.maxreads = -1
else: self.vars.maxreads = int(maxreads)
barseq_select = self.barseq_select.GetSelection()
self.vars.barseq_catalog_in = self.vars.barseq_catalog_out = None
if barseq_select==1: self.vars.barseq_catalog_in = self.picker9.GetValue()
if barseq_select==2: self.vars.barseq_catalog_out = self.picker9.GetValue()
self.vars.action = "start"
self.Close()
return 0
|
mad-lab/transit
|
src/pytpp/tpp_gui.py
|
Python
|
gpl-3.0
| 25,744
|
[
"BWA"
] |
290f8f3b9a28535a2abfb24b39c4f5de2b7f3bb60a1c90b52d17f6be4efced49
|
#!/usr/bin/env python
"""
This script demonstrates how one can script the Mayavi application by
subclassing the application, create a new scene and create a few
simple modules.
This should be run as::
$ python test.py
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2007, Enthought, Inc.
# License: BSD Style.
# Standard library imports
from os.path import join, abspath, dirname
# Enthought library imports
from mayavi.plugins.app import Mayavi
from mayavi.scripts.util import get_data_dir
class MyApp(Mayavi):
def run(self):
"""This is executed once the application GUI has started.
*Make sure all other MayaVi specific imports are made here!*
"""
# Various imports to do different things.
from mayavi.sources.vtk_file_reader import VTKFileReader
from mayavi.modules.outline import Outline
from mayavi.modules.axes import Axes
from mayavi.modules.grid_plane import GridPlane
from mayavi.modules.image_plane_widget import ImagePlaneWidget
from mayavi.modules.text import Text
script = self.script
# Create a new scene.
script.new_scene()
# Read a VTK (old style) data file.
r = VTKFileReader()
r.initialize(join(get_data_dir(dirname(abspath(__file__))), 'heart.vtk'))
script.add_source(r)
# Put up some text.
t = Text(text='MayaVi rules!', x_position=0.2,
y_position=0.9, width=0.8)
t.property.color = 1, 1, 0 # Bright yellow, yeah!
script.add_module(t)
# Create an outline for the data.
o = Outline()
script.add_module(o)
# Create an axes for the data.
a = Axes()
script.add_module(a)
# Create an orientation axes for the scene. This only works with
# VTK-4.5 and above which is why we have the try block.
try:
from mayavi.modules.orientation_axes import OrientationAxes
except ImportError:
pass
else:
a = OrientationAxes()
a.marker.set_viewport(0.0, 0.8, 0.2, 1.0)
script.add_module(a)
# Create three simple grid plane modules.
# First normal to 'x' axis.
gp = GridPlane()
script.add_module(gp)
# Second normal to 'y' axis.
gp = GridPlane()
gp.grid_plane.axis = 'y'
script.add_module(gp)
# Third normal to 'z' axis.
gp = GridPlane()
script.add_module(gp)
gp.grid_plane.axis = 'z'
# Create one ImagePlaneWidget.
ipw = ImagePlaneWidget()
script.add_module(ipw)
# Set the position to the middle of the data.
ipw.ipw.slice_position = 16
if __name__ == '__main__':
a = MyApp()
a.main()
|
dmsurti/mayavi
|
examples/mayavi/interactive/subclassing_mayavi_application.py
|
Python
|
bsd-3-clause
| 2,818
|
[
"Mayavi",
"VTK"
] |
c7b6697b83adc1b442806d4d2573bce9461ed097f8c78277680f26da6087a700
|
#!/usr/bin/env python
"""
==============================
orcaPose.py - Orca Pose Client
==============================
Reads Vicon pose data via Orca
"""
import sys, time, struct
import array as pyarray # So as not to conflict with numpy's array
from socket import *
from numpy import *
class poseHandler:
def __init__(self, proj, shared_data):
"""
Opens socket, subscribes to multicast group, and calculates
local vs. Vicon clock offset (to ensure most recent data).
Hostnames and ports are read in from the robot description file.
"""
### Connect to Orca:
try:
self.POS2D_GROUP = proj.robot_data['OrcaPositionGroup'][0]
self.POS2D_PORT = int(proj.robot_data['OrcaPositionPort'][0])
except KeyError, ValueError:
print "(POSE) ERROR: Cannot find Orca network settings ('OrcaPositionGroup', 'OrcaPositionPort') in robot description file."
sys.exit(-1)
# Open up sockets
print '(POSE) Subscribing to Orca multicast stream...'
self.pos2d_sock = socket(AF_INET, SOCK_DGRAM)
self.pos2d_sock.bind(('', self.POS2D_PORT))
# Join group
group_bin = inet_pton(AF_INET, self.POS2D_GROUP)
mreq = group_bin + struct.pack('=I', INADDR_ANY)
self.pos2d_sock.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, mreq)
# Calculate clock offset
data = self.pos2d_sock.recv(1500)
now = time.time()
packet_doubles = pyarray.array('d')
packet_doubles.fromstring(data)
time_stamp = packet_doubles[0] + (1e-6)*packet_doubles[1]
self.time_offset = time_stamp - now
print "(POSE) Detected time delay of %fsec." % self.time_offset
print "(POSE) OK! We've successfully connected."
self.cached_pose = None
def getPose(self, cached=False):
"""
Gets the most recent pose reading from the multicast stream.
"""
if not cached or self.cached_pose is None:
#TODO: It would be nice if we could actually just flush the socket...
MIN_DELAY = 0.01 # seconds
time_stamp = 0.0
now = time.time() + self.time_offset
while (now-time_stamp)>MIN_DELAY:
data = self.pos2d_sock.recv(1500)
#print "Packet size: " + str(len(data))
packet_doubles = pyarray.array('d')
packet_doubles.fromstring(data)
time_stamp = packet_doubles[0] + (1e-6)*packet_doubles[1]
pos_x = packet_doubles[2]
pos_y = packet_doubles[3]
pos_o = packet_doubles[4]
self.cached_pose = array([pos_x, pos_y, pos_o])
return self.cached_pose
|
jadecastro/LTLMoP
|
src/lib/handlers/pose/_orcaPose.py
|
Python
|
gpl-3.0
| 2,767
|
[
"ORCA"
] |
e9e733c186a94da42d6f7952a5895c83cc411d01075504897bc5bbd0ad7bdff2
|
#!/usr/bin/env python
# This code is licensed under the New BSD License
# 2009, Alexander Artemenko <svetlyak.40wt@gmail.com>
# For other contacts, visit http://aartemenko.com
import unittest
from task_tests import *
if __name__ == '__main__':
unittest.main()
|
svetlyak40wt/gtdzen
|
tests/testsuite.py
|
Python
|
bsd-3-clause
| 267
|
[
"VisIt"
] |
bc17b0eef164c363424b8a2f6af1d1db12ecd2c91c4365632ee2220dfe50f180
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkGenericDataObjectWriter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkGenericDataObjectWriter(), 'Writing vtkGenericDataObject.',
('vtkGenericDataObject',), (),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkGenericDataObjectWriter.py
|
Python
|
bsd-3-clause
| 520
|
[
"VTK"
] |
46f8fab86bbcec048605128328ec7ccbc0a949f57d9ae017c1562eac51fbc513
|
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import sys
import pytest
from time import sleep
from gpiozero.pins.mock import MockPin, MockPWMPin
from gpiozero import *
def setup_function(function):
import gpiozero.devices
# dirty, but it does the job
if function.__name__ in ('test_robot', 'test_ryanteck_robot', 'test_camjam_kit_robot', 'test_led_borg', 'test_snow_pi_initial_value_pwm'):
gpiozero.devices.pin_factory = MockPWMPin
else:
gpiozero.devices.pin_factory = MockPin
def teardown_function(function):
MockPin.clear_pins()
def test_composite_output_on_off():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with CompositeOutputDevice(OutputDevice(pin1), OutputDevice(pin2), foo=OutputDevice(pin3)) as device:
device.on()
assert all((pin1.state, pin2.state, pin3.state))
device.off()
assert not any((pin1.state, pin2.state, pin3.state))
def test_composite_output_toggle():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with CompositeOutputDevice(OutputDevice(pin1), OutputDevice(pin2), foo=OutputDevice(pin3)) as device:
device.toggle()
assert all((pin1.state, pin2.state, pin3.state))
device[0].off()
device.toggle()
assert pin1.state
assert not pin2.state
assert not pin3.state
def test_composite_output_value():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with CompositeOutputDevice(OutputDevice(pin1), OutputDevice(pin2), foo=OutputDevice(pin3)) as device:
assert device.value == (0, 0, 0)
device.toggle()
assert device.value == (1, 1, 1)
device.value = (1, 0, 1)
assert device[0].is_active
assert not device[1].is_active
assert device[2].is_active
def test_led_board_on_off():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, pin2, foo=pin3) as board:
assert isinstance(board[0], LED)
assert isinstance(board[1], LED)
assert isinstance(board[2], LED)
assert board.active_high
assert board[0].active_high
assert board[1].active_high
assert board[2].active_high
board.on()
assert all((pin1.state, pin2.state, pin3.state))
board.off()
assert not any((pin1.state, pin2.state, pin3.state))
board[0].on()
assert board.value == (1, 0, 0)
assert pin1.state
assert not pin2.state
assert not pin3.state
board.toggle()
assert board.value == (0, 1, 1)
assert not pin1.state
assert pin2.state
assert pin3.state
board.toggle(0,1)
assert board.value == (1, 0, 1)
assert pin1.state
assert not pin2.state
assert pin3.state
board.off(2)
assert board.value == (1, 0, 0)
assert pin1.state
assert not pin2.state
assert not pin3.state
board.on(1)
assert board.value == (1, 1, 0)
assert pin1.state
assert pin2.state
assert not pin3.state
board.off(0,1)
assert board.value == (0, 0, 0)
assert not pin1.state
assert not pin2.state
assert not pin3.state
board.on(1,2)
assert board.value == (0, 1, 1)
assert not pin1.state
assert pin2.state
assert pin3.state
board.toggle(0)
assert board.value == (1, 1, 1)
assert pin1.state
assert pin2.state
assert pin3.state
def test_led_board_active_low():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, pin2, foo=pin3, active_high=False) as board:
assert not board.active_high
assert not board[0].active_high
assert not board[1].active_high
assert not board[2].active_high
board.on()
assert not any ((pin1.state, pin2.state, pin3.state))
board.off()
assert all((pin1.state, pin2.state, pin3.state))
board[0].on()
assert board.value == (1, 0, 0)
assert not pin1.state
assert pin2.state
assert pin3.state
board.toggle()
assert board.value == (0, 1, 1)
assert pin1.state
assert not pin2.state
assert not pin3.state
def test_led_board_value():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, pin2, foo=pin3) as board:
assert board.value == (0, 0, 0)
board.value = (0, 1, 0)
assert board.value == (0, 1, 0)
board.value = (1, 0, 1)
assert board.value == (1, 0, 1)
def test_led_board_pwm_value():
pin1 = MockPWMPin(2)
pin2 = MockPWMPin(3)
pin3 = MockPWMPin(4)
with LEDBoard(pin1, pin2, foo=pin3, pwm=True) as board:
assert board.value == (0, 0, 0)
board.value = (0, 1, 0)
assert board.value == (0, 1, 0)
board.value = (0.5, 0, 0.75)
assert board.value == (0.5, 0, 0.75)
def test_led_board_pwm_bad_value():
pin1 = MockPWMPin(2)
pin2 = MockPWMPin(3)
pin3 = MockPWMPin(4)
with LEDBoard(pin1, pin2, foo=pin3, pwm=True) as board:
with pytest.raises(ValueError):
board.value = (-1, 0, 0)
with pytest.raises(ValueError):
board.value = (0, 2, 0)
def test_led_board_initial_value():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, pin2, foo=pin3, initial_value=0) as board:
assert board.value == (0, 0, 0)
with LEDBoard(pin1, pin2, foo=pin3, initial_value=1) as board:
assert board.value == (1, 1, 1)
def test_led_board_pwm_initial_value():
pin1 = MockPWMPin(2)
pin2 = MockPWMPin(3)
pin3 = MockPWMPin(4)
with LEDBoard(pin1, pin2, foo=pin3, pwm=True, initial_value=0) as board:
assert board.value == (0, 0, 0)
with LEDBoard(pin1, pin2, foo=pin3, pwm=True, initial_value=1) as board:
assert board.value == (1, 1, 1)
with LEDBoard(pin1, pin2, foo=pin3, pwm=True, initial_value=0.5) as board:
assert board.value == (0.5, 0.5, 0.5)
def test_led_board_pwm_bad_initial_value():
pin1 = MockPWMPin(2)
pin2 = MockPWMPin(3)
pin3 = MockPWMPin(4)
with pytest.raises(ValueError):
LEDBoard(pin1, pin2, foo=pin3, pwm=True, initial_value=-1)
with pytest.raises(ValueError):
LEDBoard(pin1, pin2, foo=pin3, pwm=True, initial_value=2)
def test_led_board_nested():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3)) as board:
assert list(led.pin for led in board.leds) == [pin1, pin2, pin3]
assert board.value == (0, (0, 0))
board.value = (1, (0, 1))
assert pin1.state
assert not pin2.state
assert pin3.state
def test_led_board_bad_blink():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3)) as board:
with pytest.raises(ValueError):
board.blink(fade_in_time=1, fade_out_time=1)
with pytest.raises(ValueError):
board.blink(fade_out_time=1)
with pytest.raises(ValueError):
board.pulse()
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_led_board_blink_background():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3)) as board:
board.blink(0.1, 0.1, n=2)
board._blink_thread.join() # naughty, but ensures no arbitrary waits in the test
test = [
(0.0, False),
(0.0, True),
(0.1, False),
(0.1, True),
(0.1, False)
]
pin1.assert_states_and_times(test)
pin2.assert_states_and_times(test)
pin3.assert_states_and_times(test)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_led_board_blink_foreground():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3)) as board:
board.blink(0.1, 0.1, n=2, background=False)
test = [
(0.0, False),
(0.0, True),
(0.1, False),
(0.1, True),
(0.1, False)
]
pin1.assert_states_and_times(test)
pin2.assert_states_and_times(test)
pin3.assert_states_and_times(test)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_led_board_blink_control():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3)) as board:
board.blink(0.1, 0.1, n=2)
# make sure the blink thread's started
while not board._blink_leds:
sleep(0.00001) # pragma: no cover
board[1][0].off() # immediately take over the second LED
board._blink_thread.join() # naughty, but ensures no arbitrary waits in the test
test = [
(0.0, False),
(0.0, True),
(0.1, False),
(0.1, True),
(0.1, False)
]
pin1.assert_states_and_times(test)
pin3.assert_states_and_times(test)
print(pin2.states)
pin2.assert_states_and_times([(0.0, False), (0.0, True), (0.0, False)])
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_led_board_blink_take_over():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3)) as board:
board[1].blink(0.1, 0.1, n=2)
board.blink(0.1, 0.1, n=2) # immediately take over blinking
board[1]._blink_thread.join()
board._blink_thread.join()
test = [
(0.0, False),
(0.0, True),
(0.1, False),
(0.1, True),
(0.1, False)
]
pin1.assert_states_and_times(test)
pin2.assert_states_and_times(test)
pin3.assert_states_and_times(test)
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_led_board_blink_control_all():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3)) as board:
board.blink(0.1, 0.1, n=2)
# make sure the blink thread's started
while not board._blink_leds:
sleep(0.00001) # pragma: no cover
board[0].off() # immediately take over all LEDs
board[1][0].off()
board[1][1].off()
board._blink_thread.join() # blink should terminate here anyway
test = [
(0.0, False),
(0.0, True),
(0.0, False),
]
pin1.assert_states_and_times(test)
pin2.assert_states_and_times(test)
pin3.assert_states_and_times(test)
def test_led_board_blink_interrupt_on():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3)) as board:
board.blink(1, 0.1)
sleep(0.2)
board.off() # should interrupt while on
pin1.assert_states([False, True, False])
pin2.assert_states([False, True, False])
pin3.assert_states([False, True, False])
def test_led_board_blink_interrupt_off():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3)) as board:
board.blink(0.1, 1)
sleep(0.2)
board.off() # should interrupt while off
pin1.assert_states([False, True, False])
pin2.assert_states([False, True, False])
pin3.assert_states([False, True, False])
@pytest.mark.skipif(hasattr(sys, 'pypy_version_info'),
reason='timing is too random on pypy')
def test_led_board_fade_background():
pin1 = MockPWMPin(2)
pin2 = MockPWMPin(3)
pin3 = MockPWMPin(4)
with LEDBoard(pin1, LEDBoard(pin2, pin3, pwm=True), pwm=True) as board:
board.blink(0, 0, 0.2, 0.2, n=2)
board._blink_thread.join()
test = [
(0.0, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
(0.04, 0.2),
(0.04, 0.4),
(0.04, 0.6),
(0.04, 0.8),
(0.04, 1),
(0.04, 0.8),
(0.04, 0.6),
(0.04, 0.4),
(0.04, 0.2),
(0.04, 0),
]
pin1.assert_states_and_times(test)
pin2.assert_states_and_times(test)
pin3.assert_states_and_times(test)
def test_led_bar_graph_value():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBarGraph(pin1, pin2, pin3) as graph:
assert isinstance(graph[0], LED)
assert isinstance(graph[1], LED)
assert isinstance(graph[2], LED)
assert graph.active_high
assert graph[0].active_high
assert graph[1].active_high
assert graph[2].active_high
graph.value = 0
assert graph.value == 0
assert not any((pin1.state, pin2.state, pin3.state))
graph.value = 1
assert graph.value == 1
assert all((pin1.state, pin2.state, pin3.state))
graph.value = 1/3
assert graph.value == 1/3
assert pin1.state and not (pin2.state or pin3.state)
graph.value = -1/3
assert graph.value == -1/3
assert pin3.state and not (pin1.state or pin2.state)
pin1.state = True
pin2.state = True
assert graph.value == 1
pin3.state = False
assert graph.value == 2/3
pin3.state = True
pin1.state = False
assert graph.value == -2/3
def test_led_bar_graph_active_low():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBarGraph(pin1, pin2, pin3, active_high=False) as graph:
assert not graph.active_high
assert not graph[0].active_high
assert not graph[1].active_high
assert not graph[2].active_high
graph.value = 0
assert graph.value == 0
assert all((pin1.state, pin2.state, pin3.state))
graph.value = 1
assert graph.value == 1
assert not any((pin1.state, pin2.state, pin3.state))
graph.value = 1/3
assert graph.value == 1/3
assert not pin1.state and pin2.state and pin3.state
graph.value = -1/3
assert graph.value == -1/3
assert not pin3.state and pin1.state and pin2.state
def test_led_bar_graph_pwm_value():
pin1 = MockPWMPin(2)
pin2 = MockPWMPin(3)
pin3 = MockPWMPin(4)
with LEDBarGraph(pin1, pin2, pin3, pwm=True) as graph:
assert isinstance(graph[0], PWMLED)
assert isinstance(graph[1], PWMLED)
assert isinstance(graph[2], PWMLED)
graph.value = 0
assert graph.value == 0
assert not any((pin1.state, pin2.state, pin3.state))
graph.value = 1
assert graph.value == 1
assert all((pin1.state, pin2.state, pin3.state))
graph.value = 1/3
assert graph.value == 1/3
assert pin1.state and not (pin2.state or pin3.state)
graph.value = -1/3
assert graph.value == -1/3
assert pin3.state and not (pin1.state or pin2.state)
graph.value = 1/2
assert graph.value == 1/2
assert (pin1.state, pin2.state, pin3.state) == (1, 0.5, 0)
pin1.state = 0
pin3.state = 1
assert graph.value == -1/2
def test_led_bar_graph_bad_value():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBarGraph(pin1, pin2, pin3) as graph:
with pytest.raises(ValueError):
graph.value = -2
with pytest.raises(ValueError):
graph.value = 2
def test_led_bar_graph_bad_init():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with pytest.raises(TypeError):
LEDBarGraph(pin1, pin2, foo=pin3)
with pytest.raises(ValueError):
LEDBarGraph(pin1, pin2, pin3, initial_value=-2)
with pytest.raises(ValueError):
LEDBarGraph(pin1, pin2, pin3, initial_value=2)
def test_led_bar_graph_initial_value():
pin1 = MockPin(2)
pin2 = MockPin(3)
pin3 = MockPin(4)
with LEDBarGraph(pin1, pin2, pin3, initial_value=1/3) as graph:
assert graph.value == 1/3
assert pin1.state and not (pin2.state or pin3.state)
with LEDBarGraph(pin1, pin2, pin3, initial_value=-1/3) as graph:
assert graph.value == -1/3
assert pin3.state and not (pin1.state or pin2.state)
def test_led_bar_graph_pwm_initial_value():
pin1 = MockPWMPin(2)
pin2 = MockPWMPin(3)
pin3 = MockPWMPin(4)
with LEDBarGraph(pin1, pin2, pin3, pwm=True, initial_value=0.5) as graph:
assert graph.value == 0.5
assert (pin1.state, pin2.state, pin3.state) == (1, 0.5, 0)
with LEDBarGraph(pin1, pin2, pin3, pwm=True, initial_value=-0.5) as graph:
assert graph.value == -0.5
assert (pin1.state, pin2.state, pin3.state) == (0, 0.5, 1)
def test_led_borg():
pins = [MockPWMPin(n) for n in (17, 27, 22)]
with LedBorg() as board:
assert [device.pin for device in board._leds] == pins
def test_pi_liter():
pins = [MockPin(n) for n in (4, 17, 27, 18, 22, 23, 24, 25)]
with PiLiter() as board:
assert [device.pin for device in board] == pins
def test_pi_liter_graph():
pins = [MockPin(n) for n in (4, 17, 27, 18, 22, 23, 24, 25)]
with PiLiterBarGraph() as board:
board.value = 0.5
assert [pin.state for pin in pins] == [1, 1, 1, 1, 0, 0, 0, 0]
pins[4].state = 1
assert board.value == 5/8
def test_traffic_lights():
red_pin = MockPin(2)
amber_pin = MockPin(3)
green_pin = MockPin(4)
with TrafficLights(red_pin, amber_pin, green_pin) as board:
board.red.on()
assert board.red.value
assert not board.amber.value
assert not board.yellow.value
assert not board.green.value
assert red_pin.state
assert not amber_pin.state
assert not green_pin.state
board.amber.on()
assert amber_pin.state
board.yellow.off()
assert not amber_pin.state
with TrafficLights(red=red_pin, yellow=amber_pin, green=green_pin) as board:
board.yellow.on()
assert not board.red.value
assert board.amber.value
assert board.yellow.value
assert not board.green.value
assert not red_pin.state
assert amber_pin.state
assert not green_pin.state
board.amber.off()
assert not amber_pin.state
def test_traffic_lights_bad_init():
with pytest.raises(ValueError):
TrafficLights()
red_pin = MockPin(2)
amber_pin = MockPin(3)
green_pin = MockPin(4)
yellow_pin = MockPin(5)
with pytest.raises(ValueError):
TrafficLights(red=red_pin, amber=amber_pin, yellow=yellow_pin, green=green_pin)
def test_pi_traffic():
pins = [MockPin(n) for n in (9, 10, 11)]
with PiTraffic() as board:
assert [device.pin for device in board] == pins
def test_pi_stop():
with pytest.raises(ValueError):
PiStop()
with pytest.raises(ValueError):
PiStop('E')
pins_a = [MockPin(n) for n in (7, 8, 25)]
with PiStop('A') as board:
assert [device.pin for device in board] == pins_a
pins_aplus = [MockPin(n) for n in (21, 20, 16)]
with PiStop('A+') as board:
assert [device.pin for device in board] == pins_aplus
pins_b = [MockPin(n) for n in (10, 9, 11)]
with PiStop('B') as board:
assert [device.pin for device in board] == pins_b
pins_bplus = [MockPin(n) for n in (13, 19, 26)]
with PiStop('B+') as board:
assert [device.pin for device in board] == pins_bplus
pins_c = [MockPin(n) for n in (18, 15, 14)]
with PiStop('C') as board:
assert [device.pin for device in board] == pins_c
pins_d = [MockPin(n) for n in (2, 3, 4)]
with PiStop('D') as board:
assert [device.pin for device in board] == pins_d
def test_snow_pi():
pins = [MockPin(n) for n in (23, 24, 25, 17, 18, 22, 7, 8, 9)]
with SnowPi() as board:
assert [device.pin for device in board.leds] == pins
def test_snow_pi_initial_value():
with SnowPi() as board:
assert all(device.pin.state == False for device in board.leds)
with SnowPi(initial_value=False) as board:
assert all(device.pin.state == False for device in board.leds)
with SnowPi(initial_value=True) as board:
assert all(device.pin.state == True for device in board.leds)
with SnowPi(initial_value=0.5) as board:
assert all(device.pin.state == True for device in board.leds)
def test_snow_pi_initial_value_pwm():
pins = [MockPWMPin(n) for n in (23, 24, 25, 17, 18, 22, 7, 8, 9)]
with SnowPi(pwm=True, initial_value=0.5) as board:
assert [device.pin for device in board.leds] == pins
assert all(device.pin.state == 0.5 for device in board.leds)
def test_traffic_lights_buzzer():
red_pin = MockPin(2)
amber_pin = MockPin(3)
green_pin = MockPin(4)
buzzer_pin = MockPin(5)
button_pin = MockPin(6)
with TrafficLightsBuzzer(
TrafficLights(red_pin, amber_pin, green_pin),
Buzzer(buzzer_pin),
Button(button_pin)) as board:
board.lights.red.on()
board.buzzer.on()
assert red_pin.state
assert not amber_pin.state
assert not green_pin.state
assert buzzer_pin.state
button_pin.drive_low()
assert board.button.is_active
def test_fish_dish():
pins = [MockPin(n) for n in (9, 22, 4, 8, 7)]
with FishDish() as board:
assert [led.pin for led in board.lights] + [board.buzzer.pin, board.button.pin] == pins
def test_traffic_hat():
pins = [MockPin(n) for n in (24, 23, 22, 5, 25)]
with TrafficHat() as board:
assert [led.pin for led in board.lights] + [board.buzzer.pin, board.button.pin] == pins
def test_robot():
pins = [MockPWMPin(n) for n in (2, 3, 4, 5)]
with Robot((2, 3), (4, 5)) as robot:
assert (
[device.pin for device in robot.left_motor] +
[device.pin for device in robot.right_motor]) == pins
assert robot.value == (0, 0)
robot.forward()
assert [pin.state for pin in pins] == [1, 0, 1, 0]
assert robot.value == (1, 1)
robot.backward()
assert [pin.state for pin in pins] == [0, 1, 0, 1]
assert robot.value == (-1, -1)
robot.forward(0.5)
assert [pin.state for pin in pins] == [0.5, 0, 0.5, 0]
assert robot.value == (0.5, 0.5)
robot.left()
assert [pin.state for pin in pins] == [0, 1, 1, 0]
assert robot.value == (-1, 1)
robot.right()
assert [pin.state for pin in pins] == [1, 0, 0, 1]
assert robot.value == (1, -1)
robot.reverse()
assert [pin.state for pin in pins] == [0, 1, 1, 0]
assert robot.value == (-1, 1)
robot.stop()
assert [pin.state for pin in pins] == [0, 0, 0, 0]
assert robot.value == (0, 0)
robot.value = (-1, -1)
assert robot.value == (-1, -1)
robot.value = (0.5, 1)
assert robot.value == (0.5, 1)
robot.value = (0, -0.5)
assert robot.value == (0, -0.5)
def test_ryanteck_robot():
pins = [MockPWMPin(n) for n in (17, 18, 22, 23)]
with RyanteckRobot() as board:
assert [device.pin for motor in board for device in motor] == pins
def test_camjam_kit_robot():
pins = [MockPWMPin(n) for n in (9, 10, 7, 8)]
with CamJamKitRobot() as board:
assert [device.pin for motor in board for device in motor] == pins
def test_energenie_bad_init():
with pytest.raises(ValueError):
Energenie()
with pytest.raises(ValueError):
Energenie(0)
with pytest.raises(ValueError):
Energenie(5)
def test_energenie():
pins = [MockPin(n) for n in (17, 22, 23, 27, 24, 25)]
with Energenie(1, initial_value=True) as device1, \
Energenie(2, initial_value=False) as device2:
assert repr(device1) == '<gpiozero.Energenie object on socket 1>'
assert repr(device2) == '<gpiozero.Energenie object on socket 2>'
assert device1.value
assert not device2.value
[pin.clear_states() for pin in pins]
device1.on()
assert device1.value
pins[0].assert_states_and_times([(0.0, False), (0.0, True)])
pins[1].assert_states_and_times([(0.0, True), (0.0, True)])
pins[2].assert_states_and_times([(0.0, True), (0.0, True)])
pins[3].assert_states_and_times([(0.0, False), (0.0, True)])
pins[4].assert_states_and_times([(0.0, False)])
pins[5].assert_states_and_times([(0.0, False), (0.1, True), (0.25, False)])
[pin.clear_states() for pin in pins]
device2.on()
assert device2.value
pins[0].assert_states_and_times([(0.0, True), (0.0, False)])
pins[1].assert_states_and_times([(0.0, True), (0.0, True)])
pins[2].assert_states_and_times([(0.0, True), (0.0, True)])
pins[3].assert_states_and_times([(0.0, True), (0.0, True)])
pins[4].assert_states_and_times([(0.0, False)])
pins[5].assert_states_and_times([(0.0, False), (0.1, True), (0.25, False)])
device1.close()
assert repr(device1) == '<gpiozero.Energenie object closed>'
|
lurch/python-gpiozero
|
tests/test_boards.py
|
Python
|
bsd-3-clause
| 25,935
|
[
"Amber"
] |
048f173fc0d38692bcab02f98d155b42a53286315622bf1559a54a3dad3bdade
|
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
import CogHQLoader, MintInterior
from toontown.toonbase import ToontownGlobals
from direct.gui import DirectGui
from toontown.toonbase import TTLocalizer
from toontown.toon import Toon
from direct.fsm import State
import CashbotHQExterior
import CashbotHQBossBattle
from pandac.PandaModules import DecalEffect
class CashbotCogHQLoader(CogHQLoader.CogHQLoader):
notify = DirectNotifyGlobal.directNotify.newCategory('CashbotCogHQLoader')
def __init__(self, hood, parentFSMState, doneEvent):
CogHQLoader.CogHQLoader.__init__(self, hood, parentFSMState, doneEvent)
self.fsm.addState(State.State('mintInterior', self.enterMintInterior, self.exitMintInterior, ['quietZone', 'cogHQExterior']))
for stateName in ['start', 'cogHQExterior', 'quietZone']:
state = self.fsm.getStateNamed(stateName)
state.addTransition('mintInterior')
self.musicFile = 'phase_9/audio/bgm/encntr_suit_HQ_nbrhood.ogg'
self.cogHQExteriorModelPath = 'phase_10/models/cogHQ/CashBotShippingStation'
self.cogHQLobbyModelPath = 'phase_10/models/cogHQ/VaultLobby'
self.geom = None
return
def load(self, zoneId):
CogHQLoader.CogHQLoader.load(self, zoneId)
Toon.loadCashbotHQAnims()
def unloadPlaceGeom(self):
if self.geom:
self.geom.removeNode()
self.geom = None
CogHQLoader.CogHQLoader.unloadPlaceGeom(self)
return
def loadPlaceGeom(self, zoneId):
self.notify.info('loadPlaceGeom: %s' % zoneId)
zoneId = zoneId - zoneId % 100
if zoneId == ToontownGlobals.CashbotHQ:
self.geom = loader.loadModel(self.cogHQExteriorModelPath)
ddLinkTunnel = self.geom.find('**/LinkTunnel1')
ddLinkTunnel.setName('linktunnel_dl_9252_DNARoot')
locator = self.geom.find('**/sign_origin')
backgroundGeom = self.geom.find('**/EntranceFrameFront')
backgroundGeom.node().setEffect(DecalEffect.make())
signText = DirectGui.OnscreenText(text=TTLocalizer.DonaldsDreamland[-1], font=ToontownGlobals.getSuitFont(), scale=3, fg=(0.87, 0.87, 0.87, 1), mayChange=False, parent=backgroundGeom)
signText.setPosHpr(locator, 0, 0, 0, 0, 0, 0)
signText.setDepthWrite(0)
elif zoneId == ToontownGlobals.CashbotLobby:
if config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: COGHQ: Visit CashbotLobby')
self.geom = loader.loadModel(self.cogHQLobbyModelPath)
else:
self.notify.warning('loadPlaceGeom: unclassified zone %s' % zoneId)
CogHQLoader.CogHQLoader.loadPlaceGeom(self, zoneId)
def unload(self):
CogHQLoader.CogHQLoader.unload(self)
Toon.unloadCashbotHQAnims()
def enterMintInterior(self, requestStatus):
self.placeClass = MintInterior.MintInterior
self.mintId = requestStatus['mintId']
self.enterPlace(requestStatus)
def exitMintInterior(self):
self.exitPlace()
self.placeClass = None
del self.mintId
return
def getExteriorPlaceClass(self):
return CashbotHQExterior.CashbotHQExterior
def getBossPlaceClass(self):
return CashbotHQBossBattle.CashbotHQBossBattle
|
silly-wacky-3-town-toon/SOURCE-COD
|
toontown/coghq/CashbotCogHQLoader.py
|
Python
|
apache-2.0
| 3,390
|
[
"VisIt"
] |
a426b9156942c3e4ea6a6e11b68a5e7e58395ea2892718aaa14460417a9bec9d
|
import collections
from pycparser import CParser
from pycparser import c_ast
def extractTypeAndName(n, defaultName=None):
if isinstance(n, c_ast.EllipsisParam):
return ('int', 0, 'vararg')
t = n.type
d = 0
while isinstance(t, c_ast.PtrDecl) or isinstance(t, c_ast.ArrayDecl):
d += 1
children = dict(t.children())
t = children['type']
if isinstance(t, c_ast.FuncDecl):
return extractTypeAndName(t)
if isinstance(t.type, c_ast.Struct) \
or isinstance(t.type, c_ast.Union) \
or isinstance(t.type, c_ast.Enum):
typename = t.type.name
else:
typename = t.type.names[0]
if typename == 'void' and d == 0 and not t.declname:
return None
name = t.declname or defaultName or ''
return typename.lstrip('_'),d,name.lstrip('_')
Function = collections.namedtuple('Function', ('type', 'derefcnt', 'name', 'args'))
Argument = collections.namedtuple('Argument', ('type', 'derefcnt', 'name'))
def Stringify(X):
return '%s %s %s' % (X.type, X.derefcnt * '*', X.name)
def ExtractFuncDecl(node, verbose=False):
# The function name needs to be dereferenced.
ftype, fderef, fname = extractTypeAndName(node)
if not fname:
print("Skipping function without a name!")
print(node.show())
return
fargs = []
for i, (argName, arg) in enumerate(node.args.children()):
defname = 'arg%i' % i
argdata = extractTypeAndName(arg, defname)
if argdata is not None:
a = Argument(*argdata)
fargs.append(a)
Func = Function(ftype, fderef, fname, fargs)
if verbose:
print(Stringify(Func) + '(' + ','.join(Stringify(a) for a in Func.args) + ');')
return Func
def ExtractAllFuncDecls(ast, verbose=False):
Functions = {}
class FuncDefVisitor(c_ast.NodeVisitor):
def visit_FuncDecl(self, node, *a):
f = ExtractFuncDecl(node, verbose)
Functions[f.name] = f
FuncDefVisitor().visit(ast)
return Functions
def ExtractFuncDeclFromSource(source):
try:
p = CParser()
ast = p.parse(source + ';')
funcs = ExtractAllFuncDecls(ast)
for name, func in funcs.items():
return func
except Exception as e:
import traceback
traceback.print_exc()
# eat it
|
pwndbg/pwndbg
|
pwndbg/funcparser.py
|
Python
|
mit
| 2,371
|
[
"VisIt"
] |
ee69a0d714616f8f0387015510540ba8bbda920574712074298010426572c44a
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
import pickle
from pymatgen.util.testing import PymatgenTest
from pymatgen.core.periodic_table import Element, Specie
from pymatgen.core.sites import Site, PeriodicSite
from pymatgen.core.lattice import Lattice
from pymatgen.core.composition import Composition
"""
Created on Jul 17, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jul 17, 2012"
class SiteTest(PymatgenTest):
def setUp(self):
self.ordered_site = Site("Fe", [0.25, 0.35, 0.45])
self.disordered_site = Site({"Fe": 0.5, "Mn": 0.5},
[0.25, 0.35, 0.45])
self.propertied_site = Site("Fe2+", [0.25, 0.35, 0.45],
{'magmom': 5.1, 'charge': 4.2})
self.dummy_site = Site("X", [0, 0, 0])
def test_properties(self):
self.assertRaises(AttributeError, getattr, self.disordered_site,
'specie')
self.assertIsInstance(self.ordered_site.specie, Element)
self.assertEqual(self.propertied_site.magmom, 5.1)
self.assertEqual(self.propertied_site.charge, 4.2)
def test_to_from_dict(self):
d = self.disordered_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site, self.disordered_site)
self.assertNotEqual(site, self.ordered_site)
d = self.propertied_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site.magmom, 5.1)
self.assertEqual(site.charge, 4.2)
d = self.dummy_site.as_dict()
site = Site.from_dict(d)
self.assertEqual(site.species_and_occu, self.dummy_site.species_and_occu)
def test_hash(self):
self.assertEqual(self.ordered_site.__hash__(), 26)
self.assertEqual(self.disordered_site.__hash__(), 51)
def test_cmp(self):
self.assertTrue(self.ordered_site > self.disordered_site)
def test_distance(self):
osite = self.ordered_site
self.assertAlmostEqual(np.linalg.norm([0.25, 0.35, 0.45]),
osite.distance_from_point([0, 0, 0]))
self.assertAlmostEqual(osite.distance(self.disordered_site), 0)
def test_pickle(self):
o = pickle.dumps(self.propertied_site)
self.assertEqual(pickle.loads(o), self.propertied_site)
class PeriodicSiteTest(PymatgenTest):
def setUp(self):
self.lattice = Lattice.cubic(10.0)
self.si = Element("Si")
self.site = PeriodicSite("Fe", [0.25, 0.35, 0.45],
self.lattice)
self.site2 = PeriodicSite({"Si": 0.5}, [0, 0, 0], self.lattice)
self.assertEqual(self.site2.species_and_occu,
Composition({Element('Si'): 0.5}),
"Inconsistent site created!")
self.propertied_site = PeriodicSite(Specie("Fe", 2),
[0.25, 0.35, 0.45],
self.lattice,
properties={'magmom': 5.1,
'charge': 4.2})
self.dummy_site = PeriodicSite("X", [0, 0, 0], self.lattice)
def test_properties(self):
"""
Test the properties for a site
"""
self.assertEqual(self.site.a, 0.25)
self.assertEqual(self.site.b, 0.35)
self.assertEqual(self.site.c, 0.45)
self.assertEqual(self.site.x, 2.5)
self.assertEqual(self.site.y, 3.5)
self.assertEqual(self.site.z, 4.5)
self.assertTrue(self.site.is_ordered)
self.assertFalse(self.site2.is_ordered)
self.assertEqual(self.propertied_site.magmom, 5.1)
self.assertEqual(self.propertied_site.charge, 4.2)
def test_distance(self):
other_site = PeriodicSite("Fe", np.array([0, 0, 0]), self.lattice)
self.assertAlmostEqual(self.site.distance(other_site), 6.22494979899,
5)
def test_distance_from_point(self):
self.assertNotAlmostEqual(self.site.distance_from_point([0.1, 0.1,
0.1]),
6.22494979899, 5)
self.assertAlmostEqual(self.site.distance_from_point([0.1, 0.1, 0.1]),
6.0564015718906887, 5)
def test_distance_and_image(self):
other_site = PeriodicSite("Fe", np.array([1, 1, 1]), self.lattice)
(distance, image) = self.site.distance_and_image(other_site)
self.assertAlmostEqual(distance, 6.22494979899, 5)
self.assertTrue(([-1, -1, -1] == image).all())
(distance, image) = self.site.distance_and_image(other_site, [1, 0, 0])
self.assertAlmostEqual(distance, 19.461500456028563, 5)
# Test that old and new distance algo give the same ans for
# "standard lattices"
lattice = Lattice(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
site1 = PeriodicSite("Fe", np.array([0.01, 0.02, 0.03]), lattice)
site2 = PeriodicSite("Fe", np.array([0.99, 0.98, 0.97]), lattice)
self.assertAlmostEqual(get_distance_and_image_old(site1, site2)[0],
site1.distance_and_image(site2)[0])
lattice = Lattice.from_parameters(1, 0.01, 1, 10, 10, 10)
site1 = PeriodicSite("Fe", np.array([0.01, 0.02, 0.03]), lattice)
site2 = PeriodicSite("Fe", np.array([0.99, 0.98, 0.97]), lattice)
self.assertTrue(get_distance_and_image_old(site1, site2)[0] >
site1.distance_and_image(site2)[0])
site2 = PeriodicSite("Fe", np.random.rand(3), lattice)
(dist_old, jimage_old) = get_distance_and_image_old(site1, site2)
(dist_new, jimage_new) = site1.distance_and_image(site2)
self.assertTrue(dist_old - dist_new > -1e-8,
"New distance algo should give smaller answers!")
self.assertFalse((abs(dist_old - dist_new) < 1e-8) ^
(jimage_old == jimage_new).all(),
"If old dist == new dist, images must be the same!")
latt = Lattice.from_parameters(3.0, 3.1, 10.0, 2.96, 2.0, 1.0)
site = PeriodicSite("Fe", [0.1, 0.1, 0.1], latt)
site2 = PeriodicSite("Fe", [0.99, 0.99, 0.99], latt)
(dist, img) = site.distance_and_image(site2)
self.assertAlmostEqual(dist, 0.15495358379511573)
self.assertEqual(list(img), [-11, 6, 0])
def test_is_periodic_image(self):
other = PeriodicSite("Fe", np.array([1.25, 2.35, 4.45]), self.lattice)
self.assertTrue(self.site.is_periodic_image(other),
"This other site should be a periodic image.")
other = PeriodicSite("Fe", np.array([1.25, 2.35, 4.46]), self.lattice)
self.assertFalse(self.site.is_periodic_image(other),
"This other site should not be a periodic image.")
other = PeriodicSite("Fe", np.array([1.25, 2.35, 4.45]),
Lattice.rhombohedral(2, 60))
self.assertFalse(self.site.is_periodic_image(other),
"Different lattices should not be periodic images.")
def test_equality(self):
other_site = PeriodicSite("Fe", np.array([1, 1, 1]), self.lattice)
self.assertTrue(self.site.__eq__(self.site))
self.assertFalse(other_site.__eq__(self.site))
self.assertFalse(self.site.__ne__(self.site))
self.assertTrue(other_site.__ne__(self.site))
def test_as_from_dict(self):
d = self.site2.as_dict()
site = PeriodicSite.from_dict(d)
self.assertEqual(site, self.site2)
self.assertNotEqual(site, self.site)
d = self.propertied_site.as_dict()
site3 = PeriodicSite({"Si": 0.5, "Fe": 0.5}, [0, 0, 0], self.lattice)
d = site3.as_dict()
site = PeriodicSite.from_dict(d)
self.assertEqual(site.species_and_occu, site3.species_and_occu)
d = self.dummy_site.as_dict()
site = PeriodicSite.from_dict(d)
self.assertEqual(site.species_and_occu, self.dummy_site.species_and_occu)
def test_to_unit_cell(self):
site = PeriodicSite("Fe", np.array([1.25, 2.35, 4.46]), self.lattice)
site = site.to_unit_cell
val = [0.25, 0.35, 0.46]
self.assertArrayAlmostEqual(site.frac_coords, val)
def get_distance_and_image_old(site1, site2, jimage=None):
"""
Gets distance between two sites assuming periodic boundary conditions.
If the index jimage of two sites atom j is not specified it selects the
j image nearest to the i atom and returns the distance and jimage
indices in terms of lattice vector translations. If the index jimage of
atom j is specified it returns the distance between the i atom and the
specified jimage atom, the given jimage is also returned.
Args:
other:
other site to get distance from.
jimage:
specific periodic image in terms of lattice translations,
e.g., [1,0,0] implies to take periodic image that is one
a-lattice vector away. If jimage is None, the image that is
nearest to the site is found.
Returns:
(distance, jimage):
distance and periodic lattice translations of the other site
for which the distance applies.
.. note::
Assumes the primitive cell vectors are sufficiently not skewed such
that the condition \|a\|cos(ab_angle) < \|b\| for all possible cell
vector pairs. ** this method does not check this condition **
"""
if jimage is None:
#Old algorithm
jimage = -np.array(np.around(site2.frac_coords - site1.frac_coords),
int)
mapped_vec = site1.lattice.get_cartesian_coords(jimage
+ site2.frac_coords
- site1.frac_coords)
dist = np.linalg.norm(mapped_vec)
return dist, jimage
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
import unittest
unittest.main()
|
matk86/pymatgen
|
pymatgen/core/tests/test_sites.py
|
Python
|
mit
| 10,437
|
[
"pymatgen"
] |
d40e6bc11c330a4e2fb371ac966da052d599e729bc2d9b0a348bbac2f0297396
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
import six
from inspirehep.modules.references.processors import (
_split_refextract_authors_str, ReferenceBuilder)
def test_reference_builder_no_uids():
rb = ReferenceBuilder()
rb.set_number('oops')
rb.set_number('1')
rb.set_texkey('book')
rb.add_title('Awesome Paper')
rb.add_raw_reference('[1] Awesome Paper', 'arXiv')
rb.add_misc('misc 0')
rb.add_misc('misc 1')
rb.add_author('Cox, Brian')
rb.add_author('O Briain, Dara', 'ed.')
rb.set_pubnote('Nucl.Phys.,B360,362')
rb.set_pubnote('BAD PUBNOTE')
rb.set_year('oops')
rb.set_year('1991')
rb.add_url('http://example.com')
rb.set_publisher('Elsevier')
rb.add_collaboration('ALICE')
rb.add_report_number('hep-th/0603001')
rb.add_report_number('hep-th/0603002')
rb.add_report_number('arXiv:0705.0016 [hep-th]')
rb.add_report_number('0705.0017 [hep-th]')
rb.add_report_number('NOT ARXIV')
expected = {
'number': 1,
'texkey': 'book',
'titles': [{'title': 'Awesome Paper'}],
'raw_reference': [
{'value': '[1] Awesome Paper', 'format': 'text',
'source': 'arXiv'},
{'value': 'BAD PUBNOTE', 'format': 'text',
'source': 'reference_builder'}
],
'misc': ['misc 0', 'misc 1'],
'authors': [{'full_name': 'Cox, Brian'},
{'full_name': 'O Briain, Dara', 'role': 'ed.'}],
'publication_info': {
'journal_title': 'Nucl.Phys.',
'journal_volume': 'B360',
'page_start': '362',
'artid': '362',
'reportnumber': 'NOT ARXIV',
'year': 1991
},
'collaboration': ['ALICE'],
'imprint': {'publisher': 'Elsevier'},
'arxiv_eprints': ['arXiv:hep-th/0603001', 'arXiv:hep-th/0603002',
'arXiv:0705.0016', 'arXiv:0705.0017'],
'urls': [{'value': 'http://example.com'}]
}
assert expected == rb.obj
def test_curation():
rb = ReferenceBuilder()
rb.set_record({'$ref': 'http://example.com'})
assert rb.obj == {'record': {'$ref': 'http://example.com'},
'curated_relation': False}
rb.curate()
assert rb.obj == {'record': {'$ref': 'http://example.com'},
'curated_relation': True}
def test_reference_builder_add_uid():
rb = ReferenceBuilder()
rb.add_uid(None)
rb.add_uid('')
rb.add_uid('thisisarandomstring')
# arXiv eprint variations
rb.add_uid('hep-th/0603001')
rb.add_uid('0705.0017 [something-th]')
# isbn
rb.add_uid('1449344852')
# cnum
rb.add_uid('C87-11-11')
# doi
rb.add_uid('http://dx.doi.org/10.3972/water973.0145.db')
# handle
rb.add_uid('hdl:10443/1646')
expected = {
'arxiv_eprints': ['arXiv:hep-th/0603001', 'arXiv:0705.0017'],
'publication_info': {
'isbn': '978-1-4493-4485-6',
'cnum': 'C87-11-11',
},
'dois': ['10.3972/water973.0145.db'],
'persistent_identifiers': ['hdl:10443/1646']
}
assert expected == rb.obj
def test_refextract_authors():
author_strings = [
'Butler, D., Demarque, P., & Smith, H. A.',
'Cenko, S. B., Kasliwal, M. M., Perley, D. A., et al.',
'J. Kätlne et al.', # Also test some unicode cases.
u'J. Kätlne et al.',
'Hoaglin D. C., Mostellar F., Tukey J. W.',
'V.M. Zhuravlev, S.V. Chervon, and V.K. Shchigolev',
'Gómez R, Reilly P, Winicour J and Isaacson R'
]
expected = [
['Butler, D.', 'Demarque, P.', 'Smith, H. A.'],
['Cenko, S. B.', 'Kasliwal, M. M.', 'Perley, D. A.'],
['J. Kätlne'],
['J. Kätlne'],
['Hoaglin D. C.', 'Mostellar F.', 'Tukey J. W.'],
['V.M. Zhuravlev', 'S.V. Chervon', 'V.K. Shchigolev'],
['Gómez R', 'Reilly P', 'Winicour J', 'Isaacson R']
]
for idx, authors_str in enumerate(author_strings):
# Expect that the function returns correct unicode representations.
expected_authors = [six.text_type(e.decode('utf8'))
for e in expected[idx]]
assert _split_refextract_authors_str(authors_str) == expected_authors
|
jacenkow/inspire-next
|
tests/unit/references/test_processors.py
|
Python
|
gpl-2.0
| 5,167
|
[
"Brian"
] |
db44cb85fa824e9703a72fff3e3b72507bebce7f126e554f6d970d3eac29e25b
|
"""
This package contains the XML handlers to read the NineML files and related
functions/classes, the NineML base meta-class (a meta-class is a factory that
generates classes) to generate a class for each NineML cell description (eg.
a 'Purkinje' class for an NineML containing a declaration of a Purkinje
cell), and the base class for each of the generated cell classes.
Author: Thomas G. Close (tclose@oist.jp)
Copyright: 2012-2014 Thomas G. Close.
License: This file is part of the "NineLine" package, which is released under
the MIT Licence, see LICENSE for details.
"""
from builtins import next
from builtins import object
from itertools import chain
import numpy as np
import quantities as pq
import neo
import nineml
from nineml.abstraction import Dynamics, Regime
from nineml.user import Property, Initial
from pype9.utils.mpi import mpi_comm, is_mpi_master
from nineml.exceptions import NineMLNameError
from pype9.annotations import PYPE9_NS
from pype9.exceptions import (
Pype9RuntimeError, Pype9AttributeError, Pype9DimensionError,
Pype9UsageError, Pype9BuildMismatchError, Pype9NoActiveSimulationError,
Pype9RegimeTransitionsNotRecordedError)
import logging
from .with_synapses import WithSynapses
logger = logging.Logger("Pype9")
# Helps to ensure that generated build names don't clash with built-in types
# e.g. 'Izhikevich'
BUILD_NAME_SUFFIX = '9ML'
class CellMetaClass(type):
"""
Metaclass for creating simulator-specific cell classes from 9ML Dynamics
classes. Instantiating a CellMetaClass with a ``nineml.Dynamics`` instance
will generate, compile and load the required simulator-specific code and
create a class that can be used to instantiate dynamics objects.
Parameters
----------
component_class : nineml.Dynamics
The 9ML component class to create the Cell class for
name : str
The name of the cell class, which is used for the generated simulator
code. If None, the name of the component_class is used. Note, names
must be unique among classes loaded within the same simulation script.
"""
def __new__(cls, component_class, build_url=None, build_version=None,
build_base_dir=None, code_generator=None, build_mode='lazy',
**kwargs):
# Grab the url before the component class is cloned
url = (build_url if build_url is not None else component_class.url)
# Clone component class so annotations can be added to it and not bleed
# into the calling code.
component_class = component_class.clone()
# If the component class is not already wrapped in a WithSynapses
# object, wrap it in one before passing to the code template generator
if not isinstance(component_class, WithSynapses):
component_class = WithSynapses.wrap(component_class)
# Extract name from component class and append build_version if
# provided
name = component_class.name + BUILD_NAME_SUFFIX
if build_version is not None:
name += build_version
if code_generator is None:
try:
code_generator = cls.Simulation.active().code_generator
except Pype9NoActiveSimulationError:
code_generator = cls.CodeGenerator(base_dir=build_base_dir)
# Get transformed build class
build_component_class = code_generator.transform_for_build(
name=name, component_class=component_class, **kwargs)
try:
Cell = cls._built_types[name]
except KeyError:
build = True
else:
if not Cell.build_component_class.equals(
build_component_class, annotations_ns=[PYPE9_NS]):
serial_kwargs = {'format': 'yaml', 'version': 2,
'to_str': True}
raise Pype9BuildMismatchError(
"Cannot build '{}' cell dynamics as name clashes with "
"non-equal component class that was previously loaded. "
"Use 'build_version' option to differentiate between "
"them (will be appended to the built name)\n\n"
"This (url:{})\n-------------------\n{}\n{}"
"\nPrevious (url:{})\n-------------------\n{}\n{}\n"
"Mismatch\n-------------------\n{}\n\n"
.format(name,
build_component_class.url,
build_component_class.serialize(**serial_kwargs),
build_component_class.dynamics.serialize(
**serial_kwargs),
Cell.build_component_class.url,
Cell.build_component_class.serialize(
**serial_kwargs),
Cell.build_component_class.dynamics.serialize(
**serial_kwargs),
build_component_class.find_mismatch(
Cell.build_component_class,
annotations_ns=[PYPE9_NS])))
build = False
if build:
# Only build the components on the root node
if is_mpi_master():
# Generate and compile cell class
code_generator.generate(component_class=build_component_class,
url=url, build_mode=build_mode,
**kwargs)
# Make slave nodes wait for the root node to finish building
mpi_comm.barrier()
# Load newly built model
code_generator.load_libraries(name, url)
# Create class member dict of new class
dct = {'name': name,
'component_class': component_class,
'build_component_class': build_component_class,
'code_generator': code_generator,
'unit_handler': code_generator.UnitHandler(component_class),
'Simulation': cls.Simulation}
# Create new class using Type.__new__ method
Cell = super(CellMetaClass, cls).__new__(
cls, name, (cls.BaseCellClass,), dct)
# Save Cell class to allow it to save it being built again
cls._built_types[name] = Cell
return Cell
def __init__(self, component_class, **kwargs):
# This initializer is empty, but since I have changed the signature of
# the __new__ method in the deriving metaclasses it complains otherwise
# (not sure if there is a more elegant way to do this).
pass
class Cell(object):
"""
Base class for all cell classes created from the CellMetaClass. It defines
all methods that can be called on cell model objects.
Parameters
----------
prototype_ : DynamicsProperties
A dynamics properties object used as the "prototype" for the cell
regime_ : str
Name of regime the cell will be initiated in
kwargs : dict(str, nineml.Quantity)
Properties and initial state variables to initiate the cell with. These
will override properties/initial-values in the prototype
"""
def __init__(self, *args, **kwargs):
self._in_array = kwargs.pop('_in_array', False)
# Flag to determine whether the cell has been initialized or not
# (it makes a difference to how the state of the cell is updated,
# either saved until the 'initialze' method is called or directly
# set to the state)
sim = self.Simulation.active()
self._t_start = sim.t_start
self._t_stop = None
if self.in_array:
for k, v in kwargs.items():
self._set(k, v) # Values should be in the right units.
self._regime_index = None
else:
# These position arguments are a little more complex to retrieve
# due to Python 2's restriction of **kwargs only following
# *args.
# Get prototype argument
if len(args) >= 1:
prototype = args[0]
if 'prototype_' in kwargs:
raise Pype9UsageError(
"Cannot provide prototype as (1st) argument ({}) and "
"keyword arg ({})".format(prototype,
kwargs['prototype_']))
else:
prototype = kwargs.pop('prototype_', self.component_class)
# Get regime argument
if len(args) >= 2:
regime = args[1]
if 'regime_' in kwargs:
raise Pype9UsageError(
"Cannot provide regime as (2nd) argument ({}) and "
"keyword arg ({})".format(regime, kwargs['regime_']))
else:
try:
regime = kwargs.pop('regime_')
except KeyError:
regime = None
if regime is None:
if self.component_class.num_regimes == 1:
regime = next(self.component_class.regime_names)
else:
raise Pype9UsageError(
"Need to specify initial regime using 'regime_' "
"keyword arg for component class with multiple "
"regimes ('{}')".format(
self.component_class.regime_names))
if len(args) > 2:
raise Pype9UsageError(
"Only two non-keyword arguments ('prototype_' and "
"'regime_' permitted in Cell __init__ (provided: {})"
.format(', '.join(args)))
self.set_regime(regime)
properties = []
initial_values = []
for name, qty in kwargs.items():
if isinstance(qty, pq.Quantity):
qty = self.unit_handler.from_pq_quantity(qty)
if name in self.component_class.state_variable_names:
initial_values.append(nineml.Initial(name, qty))
else:
properties.append(nineml.Property(name, qty))
self._nineml = nineml.DynamicsProperties(
name=self.name + '_properties',
definition=prototype,
properties=properties, initial_values=initial_values,
check_initial_values=True)
# Set up references from parameter names to internal variables and
# set parameters
for p in chain(self.properties, self.initial_values):
qty = p.quantity
if qty.value.nineml_type != 'SingleValue':
raise Pype9UsageError(
"Only SingleValue quantities can be used to initiate "
"individual cell classes ({})".format(p))
self._set(p.name, float(self.unit_handler.scale_value(qty)))
sim.register_cell(self)
@property
def component_class(self):
return self._nineml.component_class
@property
def in_array(self):
return self._in_array
def _flag_created(self, flag):
"""
Dis/Enable the override of setattr so that only properties of the 9ML
component can be set
"""
super(Cell, self).__setattr__('_created', flag)
def __contains__(self, varname):
return varname in chain(self.component_class.parameter_names,
self.component_class.state_variable_names)
def __getattr__(self, varname):
"""
Gets the value of parameters and state variables
"""
if self._created:
if varname not in self:
raise Pype9AttributeError(
"'{}' is not an attribute nor parameter or state variable "
"of the '{}' component class ('{}')"
.format(varname, self.component_class.name,
"', '".join(chain(
self.component_class.parameter_names,
self.component_class.state_variable_names))))
val = self._get(varname)
qty = self.unit_handler.assign_units(
val, self.component_class.element(
varname, child_types=Dynamics.nineml_children).dimension)
return qty
def __setattr__(self, varname, val):
"""
Sets the value of parameters and state variables
Parameters
----------
varname : str
Name of the of the parameter or state variable
val : float | pq.Quantity | nineml.Quantity
The value to set
"""
if self._created:
# Once the __init__ method has set all the members
if varname not in self:
raise Pype9AttributeError(
"'{}' is not a parameter or state variable of the '{}'"
" component class ('{}')"
.format(varname, self.component_class.name,
"', '".join(chain(
self.component_class.parameter_names,
self.component_class.state_variable_names))))
if isinstance(val, pq.Quantity):
qty = self.unit_handler.from_pq_quantity(val)
else:
qty = val
if qty.units.dimension != self.component_class.dimension_of(
varname):
raise Pype9DimensionError(
"Attempting so set '{}', which has dimension {} to "
"{}, which has dimension {}".format(
varname,
self.component_class.dimension_of(varname), qty,
qty.units.dimension))
if not self.in_array:
# Set the quantity in the nineml class
if varname in self.component_class.state_variable_names:
self._nineml.set(Initial(varname, qty))
else:
self._nineml.set(Property(varname, qty))
# Set the value in the simulator
self._set(varname, float(self.unit_handler.scale_value(qty)))
else:
super(Cell, self).__setattr__(varname, val)
def set_regime(self, regime):
if regime not in self.component_class.regime_names:
raise Pype9UsageError(
"'{}' is not a name of a regime in '{} cells "
"(regimes are '{}')".format(
regime, self.name,
"', '".join(self.component_class.regime_names)))
try:
# If regime is an integer (as it will be when passed from PyNN)
index = int(regime)
except ValueError:
# If the regime is the regime name
index = self.regime_index(regime)
super(Cell, self).__setattr__('_regime_index', index)
self._set_regime()
def get(self, varname):
"""
Gets the 9ML property associated with the varname
"""
return self._nineml.prop(varname)
def set(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __dir__(self):
"""
Append the property names to the list of attributes of a cell object
"""
return list(set(chain(
dir(super(self.__class__, self)),
self.component_class.parameter_names,
self.state_variable_names)))
@property
def properties(self):
"""
The set of component_class properties (parameter values).
"""
return self._nineml.properties
@property
def initial_values(self):
return self._nineml.initial_values
@property
def property_names(self):
return self._nineml.property_names
@property
def state_variable_names(self):
return self.component_class.state_variable_names
@property
def event_receive_port_names(self):
return self.component_class.event_receive_port_names
def __repr__(self):
return '{}(component_class="{}")'.format(
self.__class__.__name__, self._nineml.component_class.name)
def serialize(self, document, **kwargs): # @UnusedVariable
return self._nineml.serialize(document, **kwargs)
@property
def used_units(self):
return self._nineml.used_units
@classmethod
def regime_index(cls, name):
"""
Returns the index of the regime corresponding to 'name' as used in the
code generation (useful when creating arrays to set a population regime
values)
"""
return cls.build_component_class.index_of(
cls.build_component_class.regime(name))
@classmethod
def from_regime_index(cls, index):
"""
The reciprocal of regime_index, returns the regime name from its index
"""
return cls.build_component_class.from_index(
index, Regime.nineml_type,
nineml_children=Dynamics.nineml_children).name
def initialize(self):
for iv in self._nineml.initial_values:
setattr(self, iv.name, iv.quantity)
self._set_regime()
def write(self, file, **kwargs): # @ReservedAssignment
if self.in_array:
raise Pype9UsageError(
"Can only write cell properties when they are not in an "
"array")
self._nineml.write(file, **kwargs)
def reset_recordings(self):
raise NotImplementedError("Should be implemented by derived class")
def clear_recorders(self):
"""
Clears all recorders and recordings
"""
super(Cell, self).__setattr__('_recorders', {})
super(Cell, self).__setattr__('_recordings', {})
def _initialize_local_recording(self):
if not hasattr(self, '_recorders'):
self.clear_recorders()
def record(self, port_name, t_start=None):
"""
Specify the recording of a send port or state-variable before the
simulation.
"""
raise NotImplementedError("Should be implemented by derived class")
def record_regime(self):
"""
Returns the current regime at each timestep. Periods spent in each
regimes can be retrieved with the ``regime_epochs`` method.
"""
raise NotImplementedError("Should be implemented by derived class")
def recording(self, port_name, t_start=None):
"""
Return recorded data as a dictionary containing one numpy array for
each neuron, ids as keys.
Parameters
----------
port_name : str
Name of the port to retrieve the recording for
"""
raise NotImplementedError("Should be implemented by derived class")
def recordings(self, t_start=None):
seg = neo.Segment(description="Simulation of '{}' cell".format(
self._nineml.name,
('from {}'.format(t_start) if t_start is not None else '')))
for port_name in self._recorders:
if port_name == self.code_generator.REGIME_VARNAME:
continue
sig = self.recording(port_name, t_start=t_start)
if isinstance(sig, neo.AnalogSignal):
seg.analogsignals.append(sig)
else:
seg.spiketrains.append(sig)
try:
seg.epochs.append(self.regime_epochs())
except Pype9RegimeTransitionsNotRecordedError:
pass
return seg
def _regime_recording(self):
raise NotImplementedError("Should be implemented by derived class")
def regime_epochs(self):
"""
Retrieves the periods spent in each regime during the simulation
in a neo.core.EpochArray
"""
try:
rec = self._regime_recording()
except KeyError:
raise Pype9RegimeTransitionsNotRecordedError(
"Regime transitions not recorded, call 'record_regime' before"
" simulation")
cc = self.build_component_class
index_map = dict((cc.index_of(r), r.name) for r in cc.regimes)
trans_inds = np.nonzero(
np.asarray(rec[1:]) != np.asarray(rec[:-1]))[0] + 1
# Insert initial regime
trans_inds = np.insert(trans_inds, 0, 0)
labels = [index_map[int(rec[int(i)])] for i in trans_inds]
times = rec.times[trans_inds]
epochs = np.append(times, rec.t_stop) * times.units
durations = epochs[1:] - epochs[:-1]
return neo.Epoch(
times=times, durations=durations, labels=labels,
name='{}_regimes'.format(self.name))
def play(self, port_name, signal, properties=[]):
"""
Plays an analog signal or train of events into a port of the
cell
Parameters
----------
port_name : str
The name of the port to play the signal into
signal : neo.AnalogSignal | neo.SpikeTrain
The signal to play into the cell
properties : dict(str, nineml.Quantity)
Connection properties when playing into a event receive port
with static connection properties
"""
raise NotImplementedError("Should be implemented by derived class")
def connect(self, sender, send_port_name, receive_port_name, delay,
properties=[]):
"""
Connects an event send port from other into an event receive port in
the cell
Parameters
----------
sender : pype9.simulator.base.cells.Cell
The sending cell to connect the from
send_port_name : str
Name of the port in the sending cell to connect to
receive_port_name : str
Name of the receive port in the current cell to connect from
delay : nineml.Quantity (time)
The delay of the connection
properties : list(nineml.Property)
The connection properties of the event port
"""
raise NotImplementedError("Should be implemented by derived class")
def _check_connection_properties(self, port_name, properties):
props_dict = dict((p.name, p) for p in properties)
try:
param_set = self._nineml.component_class.connection_parameter_set(
port_name)
except NineMLNameError:
return # No parameter set, so no need to check
params_dict = dict((p.name, p) for p in param_set.parameters)
if set(props_dict.keys()) != set(params_dict.keys()):
raise Pype9RuntimeError(
"Mismatch between provided property and parameter names:"
"\nParameters: '{}'\nProperties: '{}'"
.format("', '".join(iter(params_dict.keys())),
"', '".join(iter(props_dict.keys()))))
for prop in properties:
if params_dict[prop.name].dimension != prop.units.dimension:
raise Pype9RuntimeError(
"Dimension of property '{}' ({}) does not match that of "
"the corresponding parameter ({})"
.format(prop.name, prop.units.dimension,
params_dict[prop.name].dimension))
def _kill(self, t_stop):
"""
Caches recording data and sets all references to the actual
simulator object to None ahead of a simulator reset. This allows cell
data to be accessed after a simulation has completed, and potentially
a new simulation to have been started.
"""
# TODO: Cache has not been implemented yet
super(Cell, self).__setattr__('_t_stop', t_stop)
def is_dead(self):
return self._t_stop is not None
def _trim_spike_train(self, train, t_start):
return train[train >= t_start]
def _trim_analog_signal(self, signal, t_start, interval):
sim_start = self.unit_handler.to_pq_quantity(self._t_start)
offset = (t_start - sim_start)
if offset > 0.0 * pq.s:
offset_index = offset / interval
if round(offset_index) != offset_index:
raise Pype9UsageError(
"Difference between recording start time ({}) needs to"
"and simulation start time ({}) must be an integer "
"multiple of the sampling interval ({})".format(
t_start, sim_start, interval))
signal = signal[int(offset_index):]
return signal
# This has to go last to avoid clobbering the property decorators
def property(self, name):
return self._nineml.property(name)
|
tclose/PyPe9
|
pype9/simulate/common/cells/base.py
|
Python
|
mit
| 24,986
|
[
"NEURON"
] |
532298413a4673e9007bf5183e1101acfa6a2fefd9bf92d83af0e85911126464
|
#!/usr/bin/python
# (C) 2013, Markus Wildi, markus.wildi@bluewin.ch
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Or visit http://www.gnu.org/licenses/gpl.html.
#
__author__ = 'markus.wildi@bluewin.ch'
import logging
import sys
import os
class Logger(object):
"""Define the logger for rts2saf
:var debug: enable more debug output with --debug and --level
:var logformat: format string
:var args: command line arguments or their defaults
"""
def __init__(self,
debug=False,
logformat='%(asctime)s:%(name)s:%(levelname)s:%(message)s',
args=None):
self.debug=debug
self.logformat=logformat
self.args=args
logFile = os.path.join(self.args.toPath, self.args.logfile)
ok= True
if os.access(logFile, os.W_OK):
logging.basicConfig(filename=logFile, level=self.args.level.upper(), format=self.logformat)
else:
if not os.path.isdir(self.args.toPath):
os.mkdir(self.args.toPath)
if os.access(self.args.toPath, os.W_OK):
try:
logging.basicConfig(filename=logFile, level=self.args.level.upper(), format=self.logformat)
except Exception, e:
print 'Logger: can not log to file: {}, trying to log to console, error: {}'.format(logFile, e)
# ugly
args.level= 'DEBUG'
else:
ok = False
logPath='/tmp/rts2saf_log'
logFile=os.path.join(logPath, self.args.logfile)
if not os.path.isdir(logPath):
os.mkdir(logPath)
try:
logging.basicConfig(filename=logFile, level=self.args.level.upper(), format=self.logformat)
except Exception, e:
print 'Logger: can not log to file: {}, trying to log to console, error: {}'.format(logFile, e)
# ugly
args.level= 'DEBUG'
self.logger = logging.getLogger()
# ToDo: simplify that
if args.level in 'DEBUG':
toconsole=True
else:
toconsole=args.toconsole
if toconsole:
# http://www.mglerner.com/blog/?p=8
soh = logging.StreamHandler(sys.stdout)
soh.setLevel(args.level)
self.logger.addHandler(soh)
if ok:
self.logger.info('logging to: {0} '.format(logFile, self.args.logfile))
else:
self.logger.warn('logging to: {0} instead of {1}'.format(logFile, os.path.join(self.args.toPath, self.args.logfile)))
|
RTS2/rts2
|
scripts/rts2saf/rts2saf/log.py
|
Python
|
lgpl-3.0
| 3,360
|
[
"VisIt"
] |
ed188c6ddddbb5aaeb177dfba6297de331a842f5cc32bef1702a62ef58b762da
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2014 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
##
from stoqlib.gui.editors.baseeditor import BaseEditor
from stoqlib.domain.till import TillClosedView
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class TillDetailsDialog(BaseEditor):
gladefile = "TillDetailsDialog"
model_type = TillClosedView
title = _(u"Till Details")
size = (-1, 230)
proxy_widgets = ('observations',
'responsible_open_name',
'responsible_close_name',
'opening_date',
'closing_date',
'initial_cash_amount',
'final_cash_amount')
def setup_proxies(self):
self.add_proxy(self.model, self.proxy_widgets)
|
andrebellafronte/stoq
|
stoqlib/gui/dialogs/tilldetails.py
|
Python
|
gpl-2.0
| 1,624
|
[
"VisIt"
] |
c33d8df536549d8524e535f30464fe141a5ba65a1045e94e5c31251d0d7abd3a
|
"""Test all components for instatiation issues."""
# pylint: disable=unused-argument,redefined-outer-name
from inspect import isclass
import pytest
from bowtie import App
from bowtie import control, visual, html
from bowtie._component import COMPONENT_REGISTRY
from bowtie.tests.utils import reset_uuid, server_check
def create_components():
"""Create components for this test."""
reset_uuid()
# pylint: disable=protected-access
controllers = []
for compstr in dir(control):
comp = getattr(control, compstr)
if (
compstr[0] != '_'
and isclass(comp)
and issubclass(comp, control._Controller)
and compstr != 'Upload'
):
controllers.append(comp())
for controller in controllers:
assert COMPONENT_REGISTRY[controller._uuid] == controller
visuals = []
for compstr in dir(visual):
comp = getattr(visual, compstr)
if compstr[0] != '_' and isclass(comp) and issubclass(comp, visual._Visual):
visuals.append(comp())
for vis in visuals:
assert COMPONENT_REGISTRY[vis._uuid] == vis
htmls = []
for compstr in dir(html):
comp = getattr(html, compstr)
if compstr[0] != '_' and isclass(comp) and issubclass(comp, html._HTML):
htmls.append(comp())
for htm in htmls:
assert COMPONENT_REGISTRY[htm._uuid] == htm
return controllers, visuals, htmls
create_components()
@pytest.fixture
def components(build_reset, monkeypatch):
"""App with all components."""
controllers, visuals, htmls = create_components()
app = App(__name__, rows=len(visuals), sidebar=True)
for controller in controllers:
# pylint: disable=protected-access
assert COMPONENT_REGISTRY[controller._uuid] == controller
app.add_sidebar(controller)
for vis in visuals:
# pylint: disable=protected-access
assert COMPONENT_REGISTRY[vis._uuid] == vis
app.add(vis)
for htm in htmls:
# pylint: disable=protected-access
assert COMPONENT_REGISTRY[htm._uuid] == htm
app.add_sidebar(htm)
assert len(COMPONENT_REGISTRY) == len(controllers) + 2 * len(visuals) + len(htmls)
# pylint: disable=protected-access
app._build()
# run second time to make sure nothing weird happens with subsequent builds
app._build()
with server_check(app) as server:
yield server
def test_components(components, chrome_driver):
"""Test that no components cause an error."""
chrome_driver.get('http://localhost:9991')
chrome_driver.implicitly_wait(5)
logs = chrome_driver.get_log('browser')
for log in logs:
if log['level'] == 'SEVERE':
raise Exception(log['message'])
|
jwkvam/bowtie
|
bowtie/tests/test_components.py
|
Python
|
mit
| 2,786
|
[
"Bowtie"
] |
61a818aacc3296bd7386d9d381c1c407b282fb1be6125a43919ad20c6c414d1c
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
import logging, time
log = logging.getLogger(__name__)
repository_name = 'filtering_1410'
repository_description = "Galaxy's filtering tool"
repository_long_description = "Long description of Galaxy's filtering repository"
category_name = 'Test 1410 - Galaxy Update Manager'
category_description = 'Functional test suite to test the update manager.'
'''
1. Create and populate the filtering_1410 repository.
2. Install filtering_1410 to Galaxy.
3. Upload a readme file.
4. Verify that the browse page now shows an update available.
'''
class TestUpdateManager( ShedTwillTestCase ):
'''Test the Galaxy update manager.'''
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
"""
Create all the user accounts that are needed for this test script to run independently of other tests.
Previously created accounts will not be re-created.
"""
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = self.test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
test_user_1_private_role = self.test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = self.test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
admin_user_private_role = self.test_db_util.get_private_role( admin_user )
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
galaxy_admin_user = self.test_db_util.get_galaxy_user( common.admin_email )
assert galaxy_admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
galaxy_admin_user_private_role = self.test_db_util.get_galaxy_private_role( galaxy_admin_user )
def test_0005_create_filtering_repository( self ):
'''Create and populate the filtering_1410 repository.'''
'''
We are at step 1 - Create and populate the filtering_1410 repository.
Create filtering_1410 and upload the tool tarball to it.
'''
category = self.create_category( name=category_name, description=category_description )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
repository = self.get_or_create_repository( name=repository_name,
description=repository_description,
long_description=repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ) )
self.upload_file( repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=True,
commit_message="Uploaded filtering 1.1.0",
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_install_filtering_repository( self ):
'''Install the filtering_1410 repository.'''
'''
We are at step 2 - Install filtering_1410 to Galaxy.
Install the filtering repository to Galaxy.
'''
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
self.install_repository( 'filtering_1410',
common.test_user_1_name,
category_name,
new_tool_panel_section_label='test_1410' )
installed_repository = self.test_db_util.get_installed_repository_by_name_owner( 'filtering_1410', common.test_user_1_name )
strings_displayed = [ 'filtering_1410',
"Galaxy's filtering tool",
'user1',
self.url.replace( 'http://', '' ),
installed_repository.installed_changeset_revision ]
self.display_galaxy_browse_repositories_page( strings_displayed=strings_displayed )
strings_displayed.extend( [ 'Installed tool shed repository', 'Valid tools', 'Filter1' ] )
self.display_installed_repository_manage_page( installed_repository, strings_displayed=strings_displayed )
self.verify_tool_metadata_for_installed_repository( installed_repository )
def test_0015_upload_readme_file( self ):
'''Upload readme.txt to filtering_1410.'''
'''
We are at step 3 - Upload a readme file.
Upload readme.txt. This will have the effect of making the installed changeset revision not be the most recent downloadable revision,
but without generating a second downloadable revision. Then sleep for 3 seconds to make sure the update manager picks up the new
revision.
'''
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
repository = self.test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
self.upload_file( repository,
filename='readme.txt',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message="Uploaded readme.txt",
strings_displayed=[],
strings_not_displayed=[] )
def test_0020_check_for_displayed_update( self ):
'''Browse installed repositories and verify update.'''
'''
We are at step 4 - Verify that the browse page now shows an update available.
The browse page should now show filtering_1410 as installed, but with a yellow box indicating that there is an update available.
'''
# Wait 3 seconds, just to be sure we're past hours_between_check.
time.sleep( 3 )
self.galaxy_logout()
self.galaxy_login( email=common.admin_email, username=common.admin_username )
repository = self.test_db_util.get_repository_by_name_and_owner( repository_name, common.test_user_1_name )
self.update_tool_shed_status()
ok_title = r'title=\"Updates are available in the Tool Shed for this revision\"'
updates_icon = '/static/images/icon_warning_sml.gif'
strings_displayed = [ ok_title, updates_icon ]
self.display_galaxy_browse_repositories_page( strings_displayed=strings_displayed )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/test/tool_shed/functional/test_1410_update_manager.py
|
Python
|
gpl-3.0
| 7,266
|
[
"Galaxy"
] |
2a04fa7ce3e931f23020981f9833f064e72ebf71fba3d10f7a7d7fdbfa415491
|
from __future__ import unicode_literals, print_function, division
import argparse
import ast
import functools
import heapq
import importlib
import inspect
import json
import logging
from contextlib import contextmanager
from collections import defaultdict
import operator
import os
import sys
import textwrap
from . import ast_utils
from . import utils
import timeit
import traceback
import itertools
from .utils import memoized, MISSING
from .compat import PY2, BUILTINS_NAME, indent, open
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from UserDict import UserDict
except ImportError:
from collections import UserDict
PROJECT_NAME = 'greentype'
CONFIG_NAME = '{}.cfg'.format(PROJECT_NAME)
EXCLUDED_DIRECTORIES = frozenset(['.svn', 'CVS', '.bzr', '.hg', '.git', '__pycache__'])
LOG = logging.getLogger(__name__)
LOG.propagate = False
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.NullHandler())
# TODO: it's better to be Trie, views better to be immutable
class Index(defaultdict):
def items(self, key_filter=None):
return ((k, self[k]) for k in self.keys(key_filter))
def values(self, key_filter=None):
return (self[k] for k in self.keys(key_filter))
def keys(self, key_filter=None):
all_keys = super(Index, self).keys()
if key_filter is None:
return all_keys
return (k for k in all_keys if key_filter(k))
class StatisticUnit(object):
__slots__ = ('_items', '_counter')
def __init__(self):
self._counter = itertools.count()
# tie breaker if item metadata doesn't support comparison
self._items = []
def add(self, item, meta_data=None):
self._items.append((item, next(self._counter), meta_data))
def min(self, key=lambda x: x):
item, _, meta = min(self._items, key=lambda x: key(x[0]))
return item, meta
def max(self, key=lambda x: x):
item, _, meta = max(self._items, key=lambda x: key(x[0]))
return item, meta
def mean(self):
return sum(item[0] for item in self._items) / len(self._items)
def as_dict(self):
d = {}
d['min'], d['min_meta'] = self.min()
d['max'], d['max_meta'] = self.max()
d['mean'] = self.mean()
return d
def __str__(self):
min_item, min_meta = self.min()
max_item, max_meta = self.max()
return 'min={} ({}) ' \
'max={} ({}) ' \
'mean={}'.format(min_item, min_meta, max_item, max_meta, self.mean())
def __repr__(self):
return str(self)
class Config(UserDict):
"""Configuration similar to the one used in Flask."""
def __init__(self, defaults, section_name):
UserDict.__init__(self, defaults.copy())
self.section_name = section_name
def merge(self, other):
self.data = utils.dict_merge(self.data, other, override=True)
def update_from_object(self, obj):
filtered = {k: v for k, v in vars(obj).items() if k in self.data}
self.merge(filtered)
def update_from_cfg_file(self, path):
# Not that only source roots are supported for now
config = configparser.ConfigParser()
config.optionxform = str.upper
config.read(path)
# location of config determine project root
filtered = {}
for name in config.options(self.section_name):
if name not in self:
continue
if isinstance(self[name], list):
filtered[name] = config.get(self.section_name, name).split(':')
elif isinstance(self[name], bool):
filtered[name] = config.getboolean(self.section_name, name)
elif isinstance(self[name], int):
filtered[name] = config.getint(self.section_name, name)
elif isinstance(self[name], float):
filtered[name] = config.getfloat(self.section_name, name)
else:
filtered[name] = config.get(self.section_name, name)
self.merge(filtered)
class GreenTypeAnalyzer(object):
def __init__(self, target_path):
defaults = {
'FOLLOW_IMPORTS': True,
'BUILTINS': list(sys.builtin_module_names),
'TARGET_NAME': None,
'TARGET_PATH': None,
'PROJECT_ROOT': None,
'PROJECT_NAME': None,
'SOURCE_ROOTS': [],
'EXCLUDE': [],
'INCLUDE': [],
'VERBOSE': False,
'QUIET': False,
'ANALYZE_BUILTINS': True
}
if PY2:
defaults['BUILTINS'] += ['_socket', 'datetime', '_collections']
self.indexes = {
'MODULE_INDEX': Index(None),
'CLASS_INDEX': Index(None),
'FUNCTION_INDEX': Index(None),
'PARAMETER_INDEX': Index(None),
'CLASS_ATTRIBUTE_INDEX': Index(set)
}
if PY2:
self.register_class(PY2_FAKE_OBJECT)
self.config = Config(defaults, PROJECT_NAME)
target_path = os.path.abspath(target_path)
self.config['TARGET_PATH'] = target_path
if os.path.isfile(target_path):
project_root = os.path.dirname(target_path)
elif os.path.isdir(target_path):
project_root = target_path
else:
raise ValueError('Unrecognized target "{}". '
'Should be either file or directory.'.format(target_path))
self.config['PROJECT_ROOT'] = project_root
self.config['PROJECT_NAME'] = os.path.basename(target_path)
self._broken_modules = set()
self.statistics = defaultdict(StatisticUnit)
self.statistics['total_project_expressions'] = 0
self.statistics['total_project_parameter_refs'] = 0
self.statistics['total_project_parameters'] = 0
def statistics_report(self):
return StatisticsReport(self)
@property
def target_path(self):
return self.config['TARGET_PATH']
@property
def project_name(self):
return self.config['PROJECT_NAME']
@property
def project_root(self):
return self.config['PROJECT_ROOT']
@property
def source_roots(self):
result = self._absolutize_paths(self.config['SOURCE_ROOTS'])
if not self.project_root in result:
result.insert(0, self.project_root)
return result
@property
def included(self):
return self._absolutize_paths(self.config['INCLUDE'])
@property
def excluded(self):
return self._absolutize_paths(self.config['EXCLUDE'])
def _project_definitions(self, defs):
return [d for d in defs if d.module and self.is_inside_project(d.module.path)]
def _absolutize_paths(self, paths):
result = []
for path in paths:
if not os.path.isabs(path):
path = os.path.join(self.project_root, path)
result.append(os.path.normpath(path))
return result
def is_excluded(self, path):
path = os.path.abspath(path)
# TODO: use globs/regexes for greater flexibility
if any(path.startswith(prefix) for prefix in self.included):
return False
if any(path.startswith(prefix) for prefix in self.excluded):
return True
return False
def is_inside_project(self, path):
path = os.path.abspath(path)
return path.startswith(self.project_root) and not self.is_excluded(path)
@property
def project_modules(self):
return self._project_definitions(self.indexes['MODULE_INDEX'].values())
@property
def project_classes(self):
return self._project_definitions(self.indexes['CLASS_INDEX'].values())
@property
def project_functions(self):
return self._project_definitions(self.indexes['FUNCTION_INDEX'].values())
@property
def project_parameters(self):
return self._project_definitions(self.indexes['PARAMETER_INDEX'].values())
def invalidate_indexes(self):
for index in self.indexes.values():
index.clear()
def report(self, msg, verbose=False):
if not self.config['QUIET']:
if not verbose or self.config['VERBOSE']:
print(msg)
LOG.info(msg)
def report_error(self, msg):
print(msg, file=sys.stderr)
LOG.error(msg)
def index_project(self):
self.report('Indexing project "{}" starting from "{}".'.format(self.project_root,
self.target_path))
self.report('Source roots: {}.'.format(', '.join(self.source_roots)), verbose=True)
LOG.debug('Python path: %s', sys.path)
if os.path.isfile(self.target_path):
if not utils.is_python_source_module(self.target_path):
raise ValueError('Not a valid Python module "{}" '
'(should end with .py).'.format(self.target_path))
self.index_module(path=self.target_path)
elif os.path.isdir(self.target_path):
for dirpath, dirnames, filenames in os.walk(self.target_path):
for name in dirnames[:]:
abs_path = os.path.abspath(os.path.join(dirpath, name))
if name in EXCLUDED_DIRECTORIES:
LOG.debug('Excluded directory "%s". Skipping.', abs_path)
dirnames.remove(name)
for name in filenames:
abs_path = os.path.abspath(os.path.join(dirpath, name))
if not utils.is_python_source_module(abs_path) or \
not self.is_inside_project(abs_path):
continue
self.index_module(abs_path)
def index_module(self, path=None, name=None):
if name is None and path is None:
raise ValueError('Either module name or module path should be given.')
if name in self._broken_modules or path in self._broken_modules:
return None
if path is not None:
try:
name = self.path_to_module_name(path)
except ValueError:
LOG.warning('Module "%s" is unreachable from sources roots', path)
self._broken_modules.add(path)
return None
else:
try:
path = self.module_name_to_path(name)
except ValueError:
LOG.warning('Module %s is not found under source roots', name)
self._broken_modules.add(name)
return None
loaded = self.indexes['MODULE_INDEX'].get(name)
if loaded:
return loaded
if self.is_excluded(path):
LOG.debug('File "%s" is explicitly excluded from project', path)
return None
try:
module_indexed = SourceModuleIndexer(self, path, name).run()
except SyntaxError:
self.report_error('Syntax error during indexing of "{}". '
'Wrong Python version?'.format(path))
LOG.error(traceback.format_exc())
self._broken_modules.add(path)
return None
if self.config['FOLLOW_IMPORTS']:
for imp in module_indexed.imports:
if not imp.import_from or imp.star_import:
# for imports of form
# >>> for import foo.bar
# or
# >>> from foo.bar import *
# go straight to 'foo.bar'
self.index_module(name=imp.imported_name)
else:
# in case of import of form
# >>> from foo.bar import baz [as quux]
# try to index both modules: foo.bar.baz and foo.bar
# the latter is needed if baz is top-level name in foo/bar/__init__.py
self.index_module(name=imp.imported_name)
self.index_module(name=utils.qname_tail(imp.imported_name))
return module_indexed
def path_to_module_name(self, path):
path = os.path.abspath(path)
roots = self.source_roots + [p for p in sys.path if p not in ('', '.', os.getcwd())]
for src_root in roots:
if path.startswith(src_root):
# check that on all way up to module correct packages with __init__ exist
relative = os.path.relpath(path, src_root)
if not all(os.path.exists(os.path.join(dir, '__init__.py'))
for dir in utils.parent_directories(path, src_root)):
continue
dir_name, base_name = os.path.split(relative)
if base_name == '__init__.py':
prepared = dir_name
else:
prepared, _ = os.path.splitext(relative)
# critical on Windows: foo.bar.py and foo.Bar.py are the same module
prepared = os.path.normcase(prepared)
return prepared.replace(os.path.sep, '.').strip('.')
raise ValueError('Unresolved module: path="{}"'.format(path))
def module_name_to_path(self, module_name):
rel_path = os.path.normcase(os.path.join(*module_name.split('.')))
roots = self.source_roots + [p for p in sys.path if p not in ('', '.', os.getcwd())]
for src_root in roots:
path = os.path.join(src_root, rel_path)
package_path = os.path.join(path, '__init__.py')
module_path = path + '.py'
if os.path.isfile(package_path):
path = package_path
elif os.path.isfile(module_path):
path = module_path
else:
continue
if all(os.path.exists(os.path.join(dir, '__init__.py'))
for dir in utils.parent_directories(path, src_root)):
return path
raise ValueError('Unresolved module: name="{}"'.format(module_name))
def index_builtins(self):
def object_name(obj):
return obj.__name__ if PY2 else obj.__qualname__
for module_name in self.config['BUILTINS']:
LOG.debug('Reflectively analyzing %s', module_name)
module = importlib.import_module(module_name, None)
for module_attr_module_name, module_attr in vars(module).items():
if inspect.isclass(module_attr):
cls = module_attr
class_name = module_name + '.' + object_name(cls)
bases = tuple(object_name(b) for b in cls.__bases__)
attributes = set(vars(cls))
self.register_class(ClassDef(class_name, None, None, bases, attributes))
@memoized(guard_value=None)
def resolve_name(self, name, module, type='class'):
"""Resolve name using indexes and following import if it's necessary."""
if type == 'class':
index = self.indexes['CLASS_INDEX']
elif type == 'function':
index = self.indexes['FUNCTION_INDEX']
elif type == 'module':
index = self.indexes['MODULE_INDEX']
else:
raise ValueError('Unknown definition type. Should be one of: class, function, module')
def check_loaded(qname):
if qname in index:
return index[qname]
# already properly qualified name or built-in
df = check_loaded(name) or check_loaded(BUILTINS_NAME + '.' + name)
if df:
return df
# not built-in
if module:
# name defined in the same module
df = check_loaded('{}.{}'.format(module.qname, name))
if df:
return df
# name is imported
for imp in module.imports:
if imp.imports_name(name):
qname = utils.qname_merge(imp.local_name, name)
# TODO: more robust qualified name handling
qname = qname.replace(imp.local_name, imp.imported_name, 1)
# Case 1:
# >>> import some.module as alias
# index some.module, then check some.module.Base
# Case 2:
# >>> from some.module import Base as alias
# index some.module, then check some.module.Base
# if not found index some.module.Base, then check some.module.Base again
df = check_loaded(qname)
if df:
return df
if not imp.import_from:
module_loaded = self.index_module(name=imp.imported_name)
if module_loaded and module_loaded is not module:
# drop local name (alias) for imports like
# import module as alias
# print(alias.MyClass.InnerClass())
top_level_name = utils.qname_drop(name, imp.local_name)
df = self.resolve_name(top_level_name, module_loaded, type)
if df:
return df
LOG.info('Module %s referenced as "import %s" in "%s" loaded '
'successfully, but definition of %s not found',
imp.imported_name, imp.imported_name, module.path, qname)
else:
# first, interpret import like 'from module import Name'
module_name = utils.qname_tail(imp.imported_name)
module_loaded = self.index_module(name=module_name)
if module_loaded and module_loaded is not module:
top_level_name = utils.qname_drop(qname, module_name)
df = self.resolve_name(top_level_name, module_loaded, type)
if df:
return df
# then, as 'from package import module'
module_loaded = self.index_module(name=imp.imported_name)
if module_loaded and module_loaded is not module:
top_level_name = utils.qname_drop(name, imp.local_name)
df = self.resolve_name(top_level_name, module_loaded, type)
if df:
return df
LOG.info('Module %s referenced as "from %s import %s" in "%s" loaded '
'successfully, but definition of %s not found',
imp.imported_name, utils.qname_tail(imp.imported_name),
utils.qname_head(imp.imported_name), module.path,
qname)
elif imp.star_import:
module_loaded = self.index_module(name=imp.imported_name)
if module_loaded and module_loaded is not module:
# no aliased allowed with 'from module import *' so we check directly
# for name searched in the first place
df = self.resolve_name(name, module_loaded, type)
if df:
return df
LOG.warning('Cannot resolve name %s in module "%s"', name,
module.path if module else '<undefined>')
@memoized
def _resolve_bases(self, class_def):
LOG.debug('Resolving bases for %s', class_def.qname)
bases = set()
for name in class_def.bases:
if name == class_def.name:
LOG.warning("Class %s uses base with the same name. Not supported "
"until flow-insensitive analysis is done.", class_def.qname)
continue
base_def = self.resolve_name(name, class_def.module, 'class')
if base_def:
bases.add(base_def)
bases.update(self._resolve_bases(base_def))
else:
LOG.warning('Base class %s of %s not found', name, class_def.qname)
return bases
def infer_parameter_types(self):
for param in self.project_parameters:
if param.attributes:
with utils.timer() as t:
param.suggested_types = self.suggest_classes(param.attributes)
self.statistics['one_parameter_time'].add(t.elapsed, list(param.attributes))
# self._resolve_bases.clear_results()
# self.resolve_name.clear_results()
def suggest_classes(self, accessed_attrs):
def unite(sets):
return functools.reduce(set.union, sets, set())
def intersect(sets):
return functools.reduce(set.intersection, sets) if sets else set()
# More fair algorithm because it considers newly discovered bases classes as well
index = self.indexes['CLASS_ATTRIBUTE_INDEX']
candidates = unite(index[attr] for attr in accessed_attrs)
self.statistics['initial_candidates'].add(len(candidates), list(accessed_attrs))
suitable = set()
checked = set()
total = 0
while candidates:
candidate = candidates.pop()
total += 1
checked.add(candidate)
bases = self._resolve_bases(candidate)
# register number of base classes for statistics
self.statistics['class_bases'].add(len(bases), candidate.qname)
available_attrs = unite(b.attributes for b in bases) | candidate.attributes
if accessed_attrs <= available_attrs:
suitable.add(candidate)
# new classes could be added to index during call to _resolve_bases(),
# so we have to check them as well
if not self.config['FOLLOW_IMPORTS']:
for base in bases:
if base in checked:
continue
if any(attr in base.attributes for attr in accessed_attrs):
candidates.add(base)
self.statistics['total_candidates'].add(total, list(accessed_attrs))
# remove subclasses if their superclasses are suitable also
for cls in suitable.copy():
if any(base in suitable for base in self._resolve_bases(cls)):
suitable.remove(cls)
return suitable
def discover_project_config(self):
for parent_dir in utils.parent_directories(self.target_path, strict=False):
config_path = os.path.join(parent_dir, CONFIG_NAME)
if os.path.exists(config_path):
self.report('Found config file at "{}".'.format(config_path), verbose=True)
self.config.update_from_cfg_file(config_path)
self.config['PROJECT_ROOT'] = os.path.dirname(config_path)
break
def register_class(self, class_def):
self.indexes['CLASS_INDEX'][class_def.qname] = class_def
if class_def.qname not in (BUILTINS_NAME + '.object', PY2_FAKE_OBJECT.qname):
class_def.attributes -= {'__doc__'}
# safe only on Python 3
if not PY2:
class_def.attributes.discard('__init__')
for attr in class_def.attributes:
self.indexes['CLASS_ATTRIBUTE_INDEX'][attr].add(class_def)
def register_function(self, func_def):
self.indexes['FUNCTION_INDEX'][func_def.qname] = func_def
for param_def in func_def.parameters:
self.indexes['PARAMETER_INDEX'][param_def.qname] = param_def
def register_module(self, module_def):
self.indexes['MODULE_INDEX'][module_def.qname] = module_def
@classmethod
def main(cls):
parser = argparse.ArgumentParser()
parser.add_argument('--src-roots', type=lambda x: x.split(':'), default=[],
dest='SOURCE_ROOTS',
help='Sources roots separated by colon.')
parser.add_argument('--exclude', type=lambda x: x.split(':'), default=[],
dest='EXCLUDE',
help='Files excluded from indexing process.')
parser.add_argument('--include', type=lambda x: x.split(':'), default=[],
dest='INCLUDE',
help='Files included to indexing process.')
parser.add_argument('-t', '--target', default='',
dest='TARGET_NAME',
help='Target qualifier to restrict output.')
parser.add_argument('-L', '--follow-imports', action='store_true',
dest='FOLLOW_IMPORTS',
help='Follow imports during indexing.')
parser.add_argument('-B', '--no-builtins', action='store_false',
dest='ANALYZE_BUILTINS',
help='Not analyze built-in modules reflectively first.')
parser.add_argument('--with-samples', action='store_true',
help='Include samples in report.')
parser.add_argument('-d', '--dump-params', action='store_true',
help='Dump parameters qualified by target.')
parser.add_argument('-v', '--verbose', action='count', default=0,
dest='verbose_level',
help='Enable verbose output.')
parser.add_argument('-o', '--output', type=argparse.FileType(mode='w'), default=str('-'),
help='File, where to write report.')
# TODO: detect piping
parser.add_argument('-q', '--quiet', action='store_true',
dest='QUIET',
help='Print only report in console.')
parser.add_argument('--json', action='store_true',
help='Dump analysis results in JSON.')
parser.add_argument('path',
help='Path to single Python module or directory.')
args = parser.parse_args()
if args.verbose_level > 1:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
LOG.addHandler(console)
args.VERBOSE = args.verbose_level > 0
analyzer = cls(os.path.abspath(os.path.expanduser(args.path)))
analyzer.config['VERBOSE'] = args.VERBOSE
analyzer.discover_project_config()
analyzer.config.update_from_object(args)
if analyzer.config['ANALYZE_BUILTINS']:
analyzer.index_builtins()
with utils.timer() as t:
analyzer.index_project()
analyzer.report('Built indexes in {:.5f} s.'.format(t.elapsed))
with utils.timer() as t:
analyzer.infer_parameter_types()
analyzer.report('Inferred types for parameters in {:.5f} s.'.format(t.elapsed))
statistics = analyzer.statistics_report()
LOG.info('Writing report to "%s"', args.output.name)
with args.output as f:
if args.json:
report = statistics.format_json(with_samples=args.with_samples)
else:
report = statistics.format_text(with_samples=args.with_samples,
dump_params=args.dump_params)
f.write(report)
class Definition(object):
def __init__(self, qname, node, module):
self.qname = qname
self.node = node
self.module = module
@property
def name(self):
_, _, head = self.qname.rpartition('.')
return head
@property
def physical(self):
return self.module is not None and self.node is not None
def __str__(self):
return '{}({})'.format(type(self).__name__, self.qname)
def __repr__(self):
return str(self)
def __eq__(self, other):
return isinstance(other, Definition) and self.qname == other.qname
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.qname)
class ModuleDef(Definition):
def __init__(self, qname, node, path, imports):
super(ModuleDef, self).__init__(qname, node, self)
self.path = path
# top-level imports
self.imports = imports
def __str__(self):
return 'module {} at "{}"'.format(self.qname, self.path)
class Import(object):
def __init__(self, imported_name, local_name, import_from, star_import=False):
assert not (star_import and not import_from)
assert not (local_name is None and not star_import)
self.imported_name = imported_name
self.local_name = local_name
self.star_import = star_import
self.import_from = import_from
def imports_name(self, name):
if self.star_import:
return False
return utils.qname_qualified_by(name, self.local_name)
class ClassDef(Definition):
def __init__(self, qname, node, module, bases, attributes):
super(ClassDef, self).__init__(qname, node, module)
self.bases = bases
self.attributes = attributes
def __str__(self):
return 'class {}({})'.format(self.qname, ', '.join(self.bases))
PY2_FAKE_OBJECT = ClassDef('PY2_FAKE_OBJECT', None, None, (), {'__doc__', '__module__'})
class FunctionDef(Definition):
def __init__(self, qname, node, module, parameters):
super(FunctionDef, self).__init__(qname, node, module)
self.parameters = parameters
def unbound_parameters(self):
# outer_class = ast_utils.find_parent(self.node, ast.ClassDef, stop_cls=ast.FunctionDef, strict=True)
# if outer_class is not None:
if self.parameters and self.parameters[0].name == 'self':
return self.parameters[1:]
return self.parameters
def __str__(self):
return 'def {}({})'.format(self.qname, ', '.join(p.name for p in self.parameters))
class ParameterDef(Definition):
def __init__(self, qname, attributes, node, module):
super(ParameterDef, self).__init__(qname, node, module)
self.qname = qname
self.attributes = attributes
self.suggested_types = set()
self.used_as_argument = 0
self.used = 0
self.used_as_operand = 0
self.returned = 0
def __str__(self):
s = '{}::{}'.format(self.qname, StructuralType(self.attributes))
if self.suggested_types:
s = '{} ~ {}'.format(s, ' | '.join(cls.qname for cls in self.suggested_types))
return s
class StructuralType(object):
def __init__(self, attributes):
self.attributes = attributes
def __str__(self):
return '{{{}}}'.format(', '.join(self.attributes))
def __repr__(self):
return str(self)
class AttributesCollector(ast.NodeVisitor):
"""Collect accessed attributes for specified qualifier."""
def collect(self, node):
self.attributes = set()
self.visit(node)
return self.attributes
def visit(self, node):
if isinstance(node, list):
for stmt in node:
self.visit(stmt)
else:
super(AttributesCollector, self).visit(node)
class SimpleAttributesCollector(AttributesCollector):
"""Collect only immediately accessed attributes for qualifier.
No alias or scope analysis is used. Operators and other special methods
are considered, hover subscriptions are.
"""
def __init__(self, name, read_only=True):
super(SimpleAttributesCollector, self).__init__()
self.name = name
self.read_only = read_only
def visit_Attribute(self, node):
if isinstance(node.value, ast.Name) and node.value.id == self.name:
if isinstance(node.ctx, ast.Load) or not self.read_only:
self.attributes.add(node.attr)
else:
self.visit(node.value)
def visit_Subscript(self, node):
if isinstance(node.value, ast.Name) and node.value.id == self.name:
if isinstance(node.ctx, ast.Load):
self.attributes.add('__getitem__')
elif isinstance(node.ctx, ast.Store):
self.attributes.add('__setitem__')
elif isinstance(node.ctx, ast.Del):
self.attributes.add('__delitem__')
class UsagesCollector(AttributesCollector):
def __init__(self, name):
super(UsagesCollector, self).__init__()
self.name = name
def collect(self, node):
self.used_as_argument = 0
self.used_as_operand = 0
self.used = 0
self.returned = 0
self.visit(node)
def visit_Name(self, node):
if node.id == self.name and not isinstance(node.ctx, (ast.Store, ast.Del, ast.Param)):
self.used += 1
# keywords lhs is identifier (raw str) and lhs is value
parent = ast_utils.node_parent(node)
if isinstance(parent, (ast.keyword, ast.Call)):
self.used_as_argument += 1
if isinstance(parent, ast.Return):
self.returned += 1
if isinstance(parent, (ast.BinOp, ast.UnaryOp)):
self.used_as_operand += 1
class SourceModuleIndexer(ast.NodeVisitor):
def __init__(self, analyzer, path, name=None):
self.analyzer = analyzer
self.indexes = analyzer.indexes
self.module_path = os.path.abspath(path)
if name is None:
self.module_name = analyzer.path_to_module_name(path)
else:
self.module_name = name
self.scopes_stack = []
self.module_def = None
self.depth = 0
self.root = None
def register(self, definition):
if isinstance(definition, ClassDef):
self.analyzer.register_class(definition)
elif isinstance(definition, FunctionDef):
self.analyzer.register_function(definition)
raise TypeError('Unknown definition: {}'.format(definition))
def qualified_name(self, node):
node_name = ast_utils.node_name(node)
scope_owner = self.parent_scope()
if scope_owner:
return scope_owner.qname + '.' + node_name
return node_name
def run(self):
LOG.debug('Indexing module "%s"', self.module_path)
# let ast deal with encoding by itself
with open(self.module_path, mode='br') as f:
self.root = ast.parse(f.read(), self.module_path)
ast_utils.interlink_ast(self.root)
self.visit(self.root)
return self.module_def
def visit(self, node):
self.depth += 1
try:
if isinstance(node, ast.expr) and self.analyzer.is_inside_project(self.module_path):
if not hasattr(node, 'ctx'):
self.analyzer.statistics['total_project_expressions'] += 1
elif not isinstance(node.ctx, (ast.Store, ast.Del, ast.Param)):
self.analyzer.statistics['total_project_expressions'] += 1
if isinstance(node, ast.Name):
for definition in reversed(self.scopes_stack):
if isinstance(definition, FunctionDef) and node.id in \
{p.name for p in definition.parameters}:
self.analyzer.statistics['total_project_parameter_refs'] += 1
break
super(SourceModuleIndexer, self).visit(node)
finally:
self.depth -= 1
def parent_scope(self):
if self.scopes_stack:
return self.scopes_stack[-1]
return None
@contextmanager
def scope_owner(self, definition):
self.scopes_stack.append(definition)
try:
yield
finally:
self.scopes_stack.pop()
def visit_Module(self, node):
self.module_def = module_def = self.module_discovered(node)
with self.scope_owner(module_def):
self.generic_visit(node)
def visit_ClassDef(self, node):
class_def = self.class_discovered(node)
with self.scope_owner(class_def):
self.generic_visit(node)
def visit_FunctionDef(self, node):
func_def = self.function_discovered(node)
with self.scope_owner(func_def):
self.generic_visit(node)
def module_discovered(self, node):
imports = []
# inspect only top-level imports
for child in ast.iter_child_nodes(node):
if isinstance(child, ast.Import):
for alias in child.names:
imports.append(Import(alias.name, alias.asname or alias.name, False))
elif isinstance(child, ast.ImportFrom):
if child.level:
package_path = self.module_path
# correctly handle absolute/relative names, drives etc.
for _ in range(child.level):
package_path = os.path.dirname(package_path)
package = self.analyzer.path_to_module_name(package_path)
else:
package = ''
if child.module and package:
target_module = package + '.' + child.module
elif child.module:
target_module = child.module
elif package:
target_module = package
else:
raise Exception('Malformed ImportFrom statement: '
'file="{}" module={}, level={}'.format(self.module_path,
child.module,
child.level))
for alias in child.names:
if alias.name == '*':
imports.append(Import(target_module, None, True, True))
else:
imported_name = '{}.{}'.format(target_module, alias.name)
imports.append(
Import(imported_name, alias.asname or alias.name, True, False))
module_def = ModuleDef(self.module_name, node, self.module_path, imports)
self.analyzer.register_module(module_def)
return module_def
def class_discovered(self, node):
class_name = self.qualified_name(node)
bases_names = []
if not node.bases:
if PY2:
bases_names.append(PY2_FAKE_OBJECT.qname)
else:
bases_names.append(BUILTINS_NAME + '.object')
for expr in node.bases:
base_name = ast_utils.attributes_chain_to_name(expr)
if base_name is None:
LOG.warning('Class %s in module %s uses computed bases. Not supported.',
class_name, self.module_def.path)
continue
bases_names.append(base_name)
# Only top-level functions and assignments are inspected
class ClassAttributeCollector(AttributesCollector):
def visit_FunctionDef(self, func_node):
self.attributes.add(func_node.name)
if ast_utils.node_name(func_node) == '__init__':
self_attributes = SimpleAttributesCollector('self', read_only=False).collect(
func_node)
self.attributes.update(self_attributes)
def visit_Assign(self, assign_node):
target = assign_node.targets[0]
if isinstance(target, ast.Name):
self.attributes.add(target.id)
class_attributes = ClassAttributeCollector().collect(node)
class_def = ClassDef(class_name, node, self.module_def, bases_names, class_attributes)
self.analyzer.register_class(class_def)
return class_def
def function_discovered(self, node):
func_name = self.qualified_name(node)
args = node.args
if isinstance(self.parent_scope(), ClassDef) and \
not ast_utils.decorated_with(node, 'staticmethod'):
declared_params = args.args[1:]
else:
declared_params = args.args[:]
# Python += update lists inplace
declared_params += [args.vararg, args.kwarg]
# TODO: filter out parameter patterns in Python 2?
total_parameters = len(args.args) + bool(args.vararg) + bool(args.kwarg)
if not PY2:
declared_params += args.kwonlyargs
total_parameters += len(args.kwonlyargs)
if self.analyzer.is_inside_project(self.module_path):
self.analyzer.statistics['total_project_parameters'] += total_parameters
parameters = []
for arg in declared_params:
# *args and **kwargs may be None
if arg is None:
continue
if isinstance(arg, str):
param_name = arg
elif PY2:
if isinstance(arg, ast.Name):
param_name = arg.id
else:
LOG.warning('Function %s uses argument patterns. Skipped.', func_name)
continue
else:
param_name = arg.arg
attributes = SimpleAttributesCollector(param_name).collect(node.body)
param_qname = func_name + '.' + param_name
param = ParameterDef(param_qname, attributes, None, self.module_def)
collector = UsagesCollector(param_name)
collector.collect(node.body)
param.used_as_argument = collector.used_as_argument
param.used_as_operand = collector.used_as_operand
param.used = collector.used
param.returned = collector.returned
parameters.append(param)
func_def = FunctionDef(func_name, node, self.module_def, parameters)
self.analyzer.register_function(func_def)
return func_def
class StatisticsReport(object):
def __init__(self, analyzer):
self.analyzer = analyzer
self.prefix = analyzer.config['TARGET_NAME'] or ''
self.modules = list(analyzer.indexes['MODULE_INDEX'].values())
self.classes = list(analyzer.indexes['CLASS_INDEX'].values())
self.functions = list(analyzer.indexes['FUNCTION_INDEX'].values())
self.parameters = list(analyzer.indexes['PARAMETER_INDEX'].values())
self.project_modules = list(self.analyzer.project_modules)
self.project_classes = list(self.analyzer.project_classes)
self.project_functions = list(self.analyzer.project_functions)
self.project_parameters = list(self.analyzer.project_parameters)
def _filter_name_prefix(self, definitions):
return [d for d in definitions if d.qname.startswith(self.prefix)]
@property
def attributeless_parameters(self):
"""Parameters that has no accessed attributes."""
return [p for p in self.project_parameters if not p.attributes]
@property
def undefined_type_parameters(self):
"""Parameters with accessed attributes but no inferred types."""
return [p for p in self.project_parameters if p.attributes and not p.suggested_types]
@property
def exact_type_parameters(self):
"""Parameters with exactly on inferred type."""
return [p for p in self.project_parameters if len(p.suggested_types) == 1]
@property
def scattered_type_parameters(self):
"""Parameters with more than one inferred type."""
return [p for p in self.project_parameters if len(p.suggested_types) > 1]
@property
def unused_parameters(self):
"""Parameters that has no attributes and which values not
used directly in function.
"""
return [p for p in self.project_parameters if not p.used]
def most_attributes_parameters(self, n):
return heapq.nlargest(n, self.project_parameters, key=lambda x: len(x.attributes))
def most_types_parameters(self, n):
return heapq.nlargest(n, self.scattered_type_parameters,
key=lambda x: len(x.suggested_types))
def as_dict(self, with_samples=False, sample_size=20):
def sample(items):
if not with_samples:
return MISSING
return list(items)[:sample_size]
def rate(items, population, sample_items=None, with_samples=with_samples):
d = {
'total': len(items),
'rate': len(items) / len(population) if items else 0
}
if with_samples:
if sample_items is None:
sample_items = items
d['sample'] = sample(sample_items)
return d
d = {
'project_name': self.analyzer.project_name,
'project_root': self.analyzer.project_root,
'indexed': {
'total': {
'modules': len(self.modules),
'classes': len(self.classes),
'functions': len(self.functions),
'parameters': len(self.parameters),
},
'in_project': {
'modules': len(self.project_modules),
'classes': len(self.project_classes),
'functions': len(self.project_functions),
'parameters': len(self.project_parameters),
}
},
'project_statistics': {
'parameters': {
'accessed_attributes': {
'max': max(len(p.attributes) for p in self.project_parameters) \
if self.project_parameters else 0,
'top': sample(self.most_attributes_parameters(sample_size))
},
'attributeless': {
'total': len(self.attributeless_parameters),
'rate': len(self.attributeless_parameters) / len(self.project_parameters) \
if self.attributeless_parameters else 0,
'sample': sample(self.attributeless_parameters),
'usages': {
'argument': rate(
items=[p for p in self.attributeless_parameters
if p.used_as_argument > 0],
population=self.attributeless_parameters,
with_samples=False
),
'operand': rate(
items=[p for p in self.attributeless_parameters
if p.used_as_operand > 0],
population=self.attributeless_parameters,
with_samples=False
),
'returned': rate(
items=[p for p in self.attributeless_parameters if p.returned > 0],
population=self.attributeless_parameters,
with_samples=False
),
'unused': rate(
items=self.unused_parameters,
population=self.attributeless_parameters
)
}
},
'undefined_type': rate(
items=self.undefined_type_parameters,
population=self.project_parameters
),
'exact_type': rate(
items=self.exact_type_parameters,
population=self.project_parameters
),
'scattered_type': rate(
items=self.scattered_type_parameters,
population=self.project_parameters,
sample_items=self.most_types_parameters(sample_size)
)
},
'additional': {name: unit.as_dict() if isinstance(unit, StatisticUnit) else unit
for name, unit in self.analyzer.statistics.items()}
}
}
return utils.deep_filter(lambda x: x is not MISSING, d)
def format_json(self, with_samples=False, sample_size=20, expand_definitions=True):
class Dumper(json.JSONEncoder):
def default(self, o):
if expand_definitions:
if isinstance(o, ParameterDef):
return {
'qualified_name': o.qname,
'accessed_attributes': list(o.attributes),
'suggested_classes': [cls.qname for cls in o.suggested_types]
}
elif isinstance(o, ClassDef):
return {
'qualified_name': o.qname,
'bases': list(o.bases),
'declared_attributes': list(o.attributes)
}
elif isinstance(o, FunctionDef):
return {
'qualified_name': o.qname,
'parameters': [p.name for p in o.parameters]
}
elif isinstance(o, ModuleDef):
return {
'qualified_name': o.qname,
'path': o.path
}
return super(Dumper, self).default(o)
return json.dumps(self.as_dict(with_samples, sample_size), cls=Dumper, indent=2)
def format_text(self, with_samples=True, samples_size=20, dump_classes=False,
dump_functions=False, dump_params=False):
d = self.as_dict(with_samples, samples_size)
formatted = '\nTotal indexed: ' \
'{} modules, ' \
'{} classes, ' \
'{} functions with {} parameters'.format(
d['indexed']['total']['modules'],
d['indexed']['total']['classes'],
d['indexed']['total']['functions'],
d['indexed']['total']['parameters'])
formatted += '\nIn project: ' \
'{} modules, ' \
'{} classes, ' \
'{} functions with {} parameters'.format(
d['indexed']['in_project']['modules'],
d['indexed']['in_project']['classes'],
d['indexed']['in_project']['functions'],
d['indexed']['in_project']['parameters'])
if with_samples:
formatted += self._format_list(
header='Most frequently accessed parameters (top {}):'.format(samples_size),
items=d['project_statistics']['parameters']['accessed_attributes']['top'],
prefix_func=lambda x: '{:3} attributes'.format(len(x.attributes))
)
stat = d['project_statistics']['parameters']
formatted += textwrap.dedent("""
Parameters statistic:
{} ({:.2%}) parameters have no attributes (types cannot be inferred):
However, of them:
- {:.2%} passed as arguments to other function
- {:.2%} used as operands in arithmetic or logical expressions
- {:.2%} returned from function
- {:.2%} unused
{} ({:.2%}) parameters with accessed attributes, but with no inferred type,
{} ({:.2%}) parameters with accessed attributes and exactly one inferred type,
{} ({:.2%}) parameters with accessed attributes and more than one inferred type
""".format(
stat['attributeless']['total'], stat['attributeless']['rate'],
stat['attributeless']['usages']['argument']['rate'],
stat['attributeless']['usages']['operand']['rate'],
stat['attributeless']['usages']['returned']['rate'],
stat['attributeless']['usages']['unused']['rate'],
stat['undefined_type']['total'], stat['undefined_type']['rate'],
stat['exact_type']['total'], stat['exact_type']['rate'],
stat['scattered_type']['total'], stat['scattered_type']['rate']
))
if with_samples:
formatted += self._format_list(
header='Parameters with scattered type (top {}):'.format(samples_size),
items=stat['scattered_type']['sample'],
prefix_func=lambda x: '{:3} types'.format(len(x.suggested_types))
)
formatted += self._format_list(
header='Parameters with accessed attributes, '
'but with no suggested classes (first {}):'.format(samples_size),
items=stat['undefined_type']['sample']
)
formatted += self._format_list(
header='Parameters that have no attributes and not used directly '
'elsewhere (first {}):'.format(samples_size),
items=stat['attributeless']['usages']['unused']['sample']
)
formatted += self._format_list(
header='Parameters with definitively inferred types '
'(first {}):'.format(samples_size),
items=stat['exact_type']['sample'],
)
if dump_classes:
classes = self._filter_name_prefix(self.project_classes)
formatted += self._format_list(header='Classes:', items=classes)
if dump_functions:
functions = self._filter_name_prefix(self.project_functions)
formatted += self._format_list(header='Functions:', items=functions)
if dump_params:
parameters = self._filter_name_prefix(self.project_parameters)
chunks = []
for param in sorted(parameters, key=operator.attrgetter('qname')):
chunks.append(textwrap.dedent("""\
Parameter {}:
- used: {:d} times
- passed to other function: {:d} times
- used in arithmetic and logical expressions {:d} times
- returned: {:d} times
""".format(param,
param.used,
param.used_as_argument,
param.used_as_operand,
param.returned)))
formatted += self._format_list(header='Parameters:', items=chunks)
formatted += self._format_list(
header='Additional statistics:',
items=('{}: {}'.format(k, str(v)) for k, v in self.analyzer.statistics.items())
)
return formatted
def __str__(self):
return self.format_text()
def __repr__(self):
return 'Statistic(project="{}")'.format(self.analyzer.project_root)
def _format_list(self, items, header=None, prefix_func=None, indentation=' '):
formatted = '\n'
if header is not None:
formatted += '{}\n'.format(header)
if not items:
formatted += indentation + 'none'
else:
blocks = []
for item in items:
item_text = str(item)
if prefix_func is not None:
prefix = '{}{} : '.format(indentation, prefix_func(item))
lines = item_text.splitlines()
first_line, remaining_lines = lines[0], lines[1:]
block = '{}{}'.format(prefix, first_line)
if remaining_lines:
indented_tail = indent('\n'.join(remaining_lines), ' ' * len(prefix))
blocks.append('{}\n{}'.format(block, indented_tail))
else:
blocks.append(block)
else:
blocks.append(indent(item_text, indentation))
formatted += '\n'.join(blocks)
return formatted + '\n'
|
east825/green-type
|
greentype/core.py
|
Python
|
mit
| 56,441
|
[
"VisIt"
] |
4df68976cf77fe3feed7790a5bda22b7cb2106b8a4033082c8350b797cd135e1
|
# Django settings for hth project.
ADMINS = (
('Brian Rutledge', 'bhrutledge@gmail.com'),
)
MANAGERS = ADMINS
EMAIL_SUBJECT_PREFIX = '[HtH] '
SERVER_EMAIL = 'django@hallelujahthehills.com'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
DATE_FORMAT = 'F j, Y'
DATE_RANGE_YEAR_FORMAT = '(n/j/y) - (n/j/y)'
DATE_RANGE_MONTH_FORMAT = '(n/j) - (n/j, Y)'
DATE_RANGE_DAY_FORMAT = '(F j)-(j, Y)'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader'
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'debugged.core.context_processors.current_site',
'debugged.bandsite.context_processors.forms',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'honeypot.middleware.HoneypotMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
CACHE_MIDDLEWARE_SECONDS = 60 * 10
CACHE_MIDDLEWARE_KEY_PREFIX = 'hth'
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
ROOT_URLCONF = 'hth.urls'
INSTALLED_APPS = (
'grappelli',
'filebrowser',
'flatblocks',
'honeypot',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.flatpages',
'django.contrib.staticfiles',
'debugged.core',
'debugged.attachments',
'debugged.contacts',
'debugged.calendar',
'debugged.discography',
'debugged.posts',
'debugged.bandsite',
'debugged.management',
)
GRAPPELLI_ADMIN_TITLE = 'Hallelujah The Hills'
FILEBROWSER_SELECT_FORMATS = {
'File': ['Folder','Image','Document','Audio'],
'Image': ['Image'],
'Audio': ['Audio'],
'Document': ['Document'],
'image': ['Image'],
'file': ['Folder','Image','Document','Audio'],
}
FILEBROWSER_VERSIONS = {
'fb_thumb': {'verbose_name': 'Admin Thumbnail', 'width': 60, 'height': 60, 'opts': 'crop upscale'},
'thumbnail': {'verbose_name': 'Thumbnail', 'width': 150, 'height': 150, 'opts': 'crop'},
'medium': {'verbose_name': 'Medium', 'width': 480, 'height': '', 'opts': ''},
'large': {'verbose_name': 'Large', 'width': 800, 'height': 800, 'opts': ''},
}
FILEBROWSER_ADMIN_VERSIONS = ['thumbnail', 'medium', 'large']
FILEBROWSER_ADMIN_THUMBNAIL = 'fb_thumb'
FILEBROWSER_CONVERT_FILENAME = False
HONEYPOT_FIELD_NAME = 'email'
BANDSITE_CONTACT_EMAILS = [
{'subject': 'General', 'email': 'ryan@hallelujahthehills.com', 'name': 'Ryan Walsh'},
#{'subject': 'PR', 'email': 'ever@tinyhuman.com', 'name': 'Ever Kipp, Tiny Human'},
#{'subject': 'Booking', 'email': 'joe@nicodemusagency.com', 'name': 'Joe Smyth, Nicodemus Agency'},
#{'subject': 'Website', 'email': 'brian@hallelujahthehills.com'},
]
BANDSITE_LIST_EMAIL = 'hth-list-join@hallelujahthehills.com'
DEBUGGED_CONTACT_TYPES = [('artist', 'Artist'), ('band', 'Band'), ('label', 'Label')]
DEBUGGED_LOCATION_TYPES = [('club', 'Club'), ('festival', 'Festival'), ('gallery', 'Gallery'),
('house', 'House'), ('movie-theater', 'Movie Theater'),
('radio', 'Radio'), ('record-store', 'Record Store')]
|
bhrutledge/hallelujahthehills.com
|
hth/settings/__init__.py
|
Python
|
mit
| 4,282
|
[
"Brian"
] |
7c5d1a8826aaad76fae42a3120d4cf0a1e2c5581a717364095ae2354acf3a738
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import functools
import numpy as np
from skbio.util._decorator import experimental
from skbio.diversity._util import (_validate_counts_matrix,
_validate_otu_ids_and_tree,
_vectorize_counts_and_tree)
from skbio.diversity._phylogenetic import _tip_distances
# The default value indicating whether normalization should be applied
# for weighted UniFrac. This is used in two locations, so set in a single
# variable to avoid the code base becoming out of sync in the event of a
# change in this default value.
_normalize_weighted_unifrac_by_default = False
@experimental(as_of="0.4.1")
def unweighted_unifrac(u_counts, v_counts, otu_ids, tree, validate=True):
""" Compute unweighted UniFrac
Parameters
----------
u_counts, v_counts: list, np.array
Vectors of counts/abundances of OTUs for two samples. Must be equal
length.
otu_ids: list, np.array
Vector of OTU ids corresponding to tip names in ``tree``. Must be the
same length as ``u_counts`` and ``v_counts``.
tree: skbio.TreeNode
Tree relating the OTUs in otu_ids. The set of tip names in the tree can
be a superset of ``otu_ids``, but not a subset.
validate: bool, optional
If `False`, validation of the input won't be performed. This step can
be slow, so if validation is run elsewhere it can be disabled here.
However, invalid input data can lead to invalid results or error
messages that are hard to interpret, so this step should not be
bypassed if you're not certain that your input data are valid. See
:mod:`skbio.diversity` for the description of what validation entails
so you can determine if you can safely disable validation.
Returns
-------
float
The unweighted UniFrac distance between the two samples.
Raises
------
ValueError, MissingNodeError, DuplicateNodeError
If validation fails. Exact error will depend on what was invalid.
See Also
--------
weighted_unifrac
skbio.diversity
skbio.diversity.beta_diversity
Notes
-----
Unweighted UniFrac was originally described in [1]_. A discussion of
unweighted (qualitative) versus weighted (quantitiative) diversity metrics
is presented in [2]_. Deeper mathemtical discussions of this metric is
presented in [3]_.
If computing unweighted UniFrac for multiple pairs of samples, using
``skbio.diversity.beta_diversity`` will be much faster than calling this
function individually on each sample.
This implementation differs from that in PyCogent (and therefore QIIME
versions less than 2.0.0) by imposing a few additional restrictions on the
inputs. First, the input tree must be rooted. In PyCogent, if an unrooted
tree was provided that had a single trifurcating node (a newick convention
for unrooted trees) that node was considered the root of the tree. Next,
all OTU IDs must be tips in the tree. PyCogent would silently ignore OTU
IDs that were not present the tree. To reproduce UniFrac results from
PyCogent with scikit-bio, ensure that your PyCogent UniFrac calculations
are performed on a rooted tree and that all OTU IDs are present in the
tree.
This implementation of unweighted UniFrac is the array-based implementation
described in [4]_.
References
----------
.. [1] Lozupone, C. & Knight, R. UniFrac: a new phylogenetic method for
comparing microbial communities. Appl. Environ. Microbiol. 71, 8228-8235
(2005).
.. [2] Lozupone, C. A., Hamady, M., Kelley, S. T. & Knight, R. Quantitative
and qualitative beta diversity measures lead to different insights into
factors that structure microbial communities. Appl. Environ. Microbiol.
73, 1576-1585 (2007).
.. [3] Lozupone, C., Lladser, M. E., Knights, D., Stombaugh, J. & Knight,
R. UniFrac: an effective distance metric for microbial community
comparison. ISME J. 5, 169-172 (2011).
.. [4] Hamady M, Lozupone C, Knight R. Fast UniFrac: facilitating high-
throughput phylogenetic analyses of microbial communities including
analysis of pyrosequencing and PhyloChip data. ISME J. 4(1):17-27
(2010).
Examples
--------
Assume we have the following abundance data for two samples, ``u`` and
``v``, represented as a pair of counts vectors. These counts represent the
number of times specific Operational Taxonomic Units, or OTUs, were
observed in each of the samples.
>>> u_counts = [1, 0, 0, 4, 1, 2, 3, 0]
>>> v_counts = [0, 1, 1, 6, 0, 1, 0, 0]
Because UniFrac is a phylogenetic diversity metric, we need to know which
OTU each count corresponds to, which we'll provide as ``otu_ids``.
>>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7',
... 'OTU8']
We also need a phylogenetic tree that relates the OTUs to one another.
>>> from io import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO(
... '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
... '(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
... ',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
We can then compute the unweighted UniFrac distance between the samples.
>>> from skbio.diversity.beta import unweighted_unifrac
>>> uu = unweighted_unifrac(u_counts, v_counts, otu_ids, tree)
>>> print(round(uu, 2))
0.37
"""
u_node_counts, v_node_counts, _, _, tree_index =\
_setup_pairwise_unifrac(u_counts, v_counts, otu_ids, tree, validate,
normalized=False, unweighted=True)
return _unweighted_unifrac(u_node_counts, v_node_counts,
tree_index['length'])
@experimental(as_of="0.4.1")
def weighted_unifrac(u_counts, v_counts, otu_ids, tree,
normalized=_normalize_weighted_unifrac_by_default,
validate=True):
""" Compute weighted UniFrac with or without branch length normalization
Parameters
----------
u_counts, v_counts: list, np.array
Vectors of counts/abundances of OTUs for two samples. Must be equal
length.
otu_ids: list, np.array
Vector of OTU ids corresponding to tip names in ``tree``. Must be the
same length as ``u_counts`` and ``v_counts``.
tree: skbio.TreeNode
Tree relating the OTUs in otu_ids. The set of tip names in the tree can
be a superset of ``otu_ids``, but not a subset.
normalized: boolean, optional
If ``True``, apply branch length normalization, which is described in
[1]_. Resulting distances will then be in the range ``[0, 1]``.
validate: bool, optional
If `False`, validation of the input won't be performed. This step can
be slow, so if validation is run elsewhere it can be disabled here.
However, invalid input data can lead to invalid results or error
messages that are hard to interpret, so this step should not be
bypassed if you're not certain that your input data are valid. See
:mod:`skbio.diversity` for the description of what validation entails
so you can determine if you can safely disable validation.
Returns
-------
float
The weighted UniFrac distance between the two samples.
Raises
------
ValueError, MissingNodeError, DuplicateNodeError
If validation fails. Exact error will depend on what was invalid.
See Also
--------
unweighted_unifrac
skbio.diversity
skbio.diversity.beta_diversity
Notes
-----
Weighted UniFrac was originally described in [1]_, which includes a
discussion of unweighted (qualitative) versus weighted (quantitiative)
diversity metrics. Deeper mathemtical discussions of this metric is
presented in [2]_.
If computing weighted UniFrac for multiple pairs of samples, using
``skbio.diversity.beta_diversity`` will be much faster than calling this
function individually on each sample.
This implementation differs from that in PyCogent (and therefore QIIME
versions less than 2.0.0) by imposing a few additional restrictions on the
inputs. First, the input tree must be rooted. In PyCogent, if an unrooted
tree was provided that had a single trifurcating node (a newick convention
for unrooted trees) that node was considered the root of the tree. Next,
all OTU IDs must be tips in the tree. PyCogent would silently ignore OTU
IDs that were not present the tree. To reproduce UniFrac results from
PyCogent with scikit-bio, ensure that your PyCogent UniFrac calculations
are performed on a rooted tree and that all OTU IDs are present in the
tree.
This implementation of weighted UniFrac is the array-based implementation
described in [3]_.
References
----------
.. [1] Lozupone, C. A., Hamady, M., Kelley, S. T. & Knight, R. Quantitative
and qualitative beta diversity measures lead to different insights into
factors that structure microbial communities. Appl. Environ. Microbiol.
73, 1576-1585 (2007).
.. [2] Lozupone, C., Lladser, M. E., Knights, D., Stombaugh, J. & Knight,
R. UniFrac: an effective distance metric for microbial community
comparison. ISME J. 5, 169-172 (2011).
.. [3] Hamady M, Lozupone C, Knight R. Fast UniFrac: facilitating high-
throughput phylogenetic analyses of microbial communities including
analysis of pyrosequencing and PhyloChip data. ISME J. 4(1):17-27
(2010).
Examples
--------
Assume we have the following abundance data for two samples, ``u`` and
``v``, represented as a pair of counts vectors. These counts represent the
number of times specific Operational Taxonomic Units, or OTUs, were
observed in each of the samples.
>>> u_counts = [1, 0, 0, 4, 1, 2, 3, 0]
>>> v_counts = [0, 1, 1, 6, 0, 1, 0, 0]
Because UniFrac is a phylogenetic diversity metric, we need to know which
OTU each count corresponds to, which we'll provide as ``otu_ids``.
>>> otu_ids = ['OTU1', 'OTU2', 'OTU3', 'OTU4', 'OTU5', 'OTU6', 'OTU7',
... 'OTU8']
We also need a phylogenetic tree that relates the OTUs to one another.
>>> from io import StringIO
>>> from skbio import TreeNode
>>> tree = TreeNode.read(StringIO(
... '(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,'
... '(OTU4:0.75,(OTU5:0.5,((OTU6:0.33,OTU7:0.62):0.5'
... ',OTU8:0.5):0.5):0.5):1.25):0.0)root;'))
Compute the weighted UniFrac distance between the samples.
>>> from skbio.diversity.beta import weighted_unifrac
>>> wu = weighted_unifrac(u_counts, v_counts, otu_ids, tree)
>>> print(round(wu, 2))
1.54
Compute the weighted UniFrac distance between the samples including
branch length normalization so the value falls in the range ``[0.0, 1.0]``.
>>> wu = weighted_unifrac(u_counts, v_counts, otu_ids, tree,
... normalized=True)
>>> print(round(wu, 2))
0.33
"""
u_node_counts, v_node_counts, u_total_count, v_total_count, tree_index =\
_setup_pairwise_unifrac(u_counts, v_counts, otu_ids, tree, validate,
normalized=normalized, unweighted=False)
branch_lengths = tree_index['length']
if normalized:
tip_indices = _get_tip_indices(tree_index)
node_to_root_distances = _tip_distances(branch_lengths, tree,
tip_indices)
return _weighted_unifrac_normalized(u_node_counts, v_node_counts,
u_total_count, v_total_count,
branch_lengths,
node_to_root_distances)
else:
return _weighted_unifrac(u_node_counts, v_node_counts,
u_total_count, v_total_count,
branch_lengths)[0]
def _validate(u_counts, v_counts, otu_ids, tree):
_validate_counts_matrix([u_counts, v_counts], suppress_cast=True)
_validate_otu_ids_and_tree(counts=u_counts, otu_ids=otu_ids, tree=tree)
def _setup_pairwise_unifrac(u_counts, v_counts, otu_ids, tree, validate,
normalized, unweighted):
if validate:
_validate(u_counts, v_counts, otu_ids, tree)
# temporarily store u_counts and v_counts in a 2-D array as that's what
# _vectorize_counts_and_tree takes
u_counts = np.asarray(u_counts)
v_counts = np.asarray(v_counts)
counts = np.vstack([u_counts, v_counts])
counts_by_node, tree_index, branch_lengths = \
_vectorize_counts_and_tree(counts, otu_ids, tree)
# unpack counts vectors for single pairwise UniFrac calculation
u_node_counts = counts_by_node[0]
v_node_counts = counts_by_node[1]
u_total_count = u_counts.sum()
v_total_count = v_counts.sum()
return (u_node_counts, v_node_counts, u_total_count, v_total_count,
tree_index)
def _unweighted_unifrac(u_node_counts, v_node_counts, branch_lengths):
"""
Parameters
----------
u_node_counts, v_node_counts : np.array
Vectors indicating presense (value greater than zero) and absense
(value equal to zero) of nodes in two samples, `u` and `v`. Order is
assumed to be the same as in `branch_lengths`.
branch_lengths : np.array
Vector of branch lengths of all nodes (tips and internal nodes) in
postorder representation of their tree.
Returns
-------
float
Unweighted UniFrac distance between samples.
Notes
-----
The count vectors passed here correspond to all nodes in the tree, not
just the tips.
"""
unique_nodes = np.logical_xor(u_node_counts, v_node_counts)
observed_nodes = np.logical_or(u_node_counts, v_node_counts)
unique_branch_length = (branch_lengths * unique_nodes).sum()
observed_branch_length = (branch_lengths * observed_nodes).sum()
if observed_branch_length == 0.0:
# handle special case to avoid division by zero
return 0.0
return unique_branch_length / observed_branch_length
def _weighted_unifrac(u_node_counts, v_node_counts, u_total_count,
v_total_count, branch_lengths):
"""
Parameters
----------
u_node_counts, v_node_counts : np.array
Vectors indicating presense (value greater than zero) and absense
(value equal to zero) of nodes in two samples, `u` and `v`. Order is
assumed to be the same as in `branch_lengths`.
u_total_count, v_total_counts : int
The sum of ``u_node_counts`` and ``v_node_counts`` vectors,
respectively. This could be computed internally, but since this is a
private method and the calling function has already generated these
values, this saves an iteration over each of these vectors.
branch_lengths : np.array
Vector of branch lengths of all nodes (tips and internal nodes) in
postorder representation of their tree.
Returns
-------
float
Weighted UniFrac distance between samples.
np.array of float
Proportional abundance of each node in tree in sample `u`
np.array of float
Proportional abundance of each node in tree in sample `v`
"""
if u_total_count > 0:
# convert to relative abundances if there are any counts
u_node_proportions = u_node_counts / u_total_count
else:
# otherwise, we'll just do the computation with u_node_counts, which
# is necessarily all zeros
u_node_proportions = u_node_counts
if v_total_count > 0:
v_node_proportions = v_node_counts / v_total_count
else:
v_node_proportions = v_node_counts
wu = (branch_lengths *
np.absolute(u_node_proportions - v_node_proportions)).sum()
return wu, u_node_proportions, v_node_proportions
def _weighted_unifrac_normalized(u_node_counts, v_node_counts, u_total_count,
v_total_count, branch_lengths,
node_to_root_distances):
"""
Parameters
----------
u_node_counts, v_node_counts : np.array
Vectors indicating presense (value greater than zero) and absense
(value equal to zero) of nodes in two samples, `u` and `v`. Order is
assumed to be the same as in `branch_lengths`.
u_total_count, v_total_counts : int
The sum of ``u_node_counts`` and ``v_node_counts`` vectors,
respectively. This could be computed internally, but since this is a
private method and the calling function has already generated these
values, this saves an iteration over each of these vectors.
tree: skbio.TreeNode
Tree relating the OTUs.
Returns
-------
float
Normalized weighted UniFrac distance between samples.
Notes
-----
The count vectors passed here correspond to all nodes in the tree, not
just the tips.
"""
if u_total_count == 0.0 and v_total_count == 0.0:
# handle special case to avoid division by zero
return 0.0
u, u_node_proportions, v_node_proportions = _weighted_unifrac(
u_node_counts, v_node_counts, u_total_count, v_total_count,
branch_lengths)
c = _weighted_unifrac_branch_correction(
node_to_root_distances, u_node_proportions, v_node_proportions)
return u / c
def _setup_multiple_unifrac(counts, otu_ids, tree, validate):
if validate:
_validate_otu_ids_and_tree(counts[0], otu_ids, tree)
counts_by_node, tree_index, branch_lengths = \
_vectorize_counts_and_tree(counts, otu_ids, tree)
return counts_by_node, tree_index, branch_lengths
def _setup_multiple_unweighted_unifrac(counts, otu_ids, tree, validate):
""" Create optimized pdist-compatible unweighted UniFrac function
Parameters
----------
counts : 2D array_like of ints or floats
Matrix containing count/abundance data where each row contains counts
of observations in a given sample.
otu_ids: list, np.array
Vector of OTU ids corresponding to tip names in ``tree``. Must be the
same length as ``u_counts`` and ``v_counts``. These IDs do not need to
be in tip order with respect to the tree.
tree: skbio.TreeNode
Tree relating the OTUs in otu_ids. The set of tip names in the tree can
be a superset of ``otu_ids``, but not a subset.
validate: bool, optional
If `False`, validation of the input won't be performed.
Returns
-------
function
Optimized pairwise unweighted UniFrac calculator that can be passed
to ``scipy.spatial.distance.pdist``.
2D np.array of ints, floats
Counts of all nodes in ``tree``.
"""
counts_by_node, _, branch_lengths = \
_setup_multiple_unifrac(counts, otu_ids, tree, validate)
f = functools.partial(_unweighted_unifrac, branch_lengths=branch_lengths)
return f, counts_by_node
def _setup_multiple_weighted_unifrac(counts, otu_ids, tree, normalized,
validate):
""" Create optimized pdist-compatible weighted UniFrac function
Parameters
----------
counts : 2D array_like of ints or floats
Matrix containing count/abundance data where each row contains counts
of observations in a given sample.
otu_ids: list, np.array
Vector of OTU ids corresponding to tip names in ``tree``. Must be the
same length as ``u_counts`` and ``v_counts``. These IDs do not need to
be in tip order with respect to the tree.
tree: skbio.TreeNode
Tree relating the OTUs in otu_ids. The set of tip names in the tree can
be a superset of ``otu_ids``, but not a subset.
validate: bool, optional
If `False`, validation of the input won't be performed.
Returns
-------
function
Optimized pairwise unweighted UniFrac calculator that can be passed
to ``scipy.spatial.distance.pdist``.
2D np.array of ints, floats
Counts of all nodes in ``tree``.
"""
counts_by_node, tree_index, branch_lengths = \
_setup_multiple_unifrac(counts, otu_ids, tree, validate)
tip_indices = _get_tip_indices(tree_index)
if normalized:
node_to_root_distances = _tip_distances(branch_lengths, tree,
tip_indices)
def f(u_node_counts, v_node_counts):
u_total_count = np.take(u_node_counts, tip_indices).sum()
v_total_count = np.take(v_node_counts, tip_indices).sum()
u = _weighted_unifrac_normalized(
u_node_counts, v_node_counts, u_total_count, v_total_count,
branch_lengths, node_to_root_distances)
return u
else:
def f(u_node_counts, v_node_counts):
u_total_count = np.take(u_node_counts, tip_indices).sum()
v_total_count = np.take(v_node_counts, tip_indices).sum()
u, _, _ = _weighted_unifrac(u_node_counts, v_node_counts,
u_total_count, v_total_count,
branch_lengths)
return u
return f, counts_by_node
def _get_tip_indices(tree_index):
tip_indices = np.array([n.id for n in tree_index['id_index'].values()
if n.is_tip()])
return tip_indices
def _weighted_unifrac_branch_correction(node_to_root_distances,
u_node_proportions,
v_node_proportions):
"""Calculates weighted unifrac branch length correction.
Parameters
----------
node_to_root_distances : np.ndarray
1D column vector of branch lengths in post order form. There should be
positions in this vector for all nodes in the tree, but only tips
should be non-zero.
u_node_proportions, v_node_proportions : np.ndarray
Proportional abundace of observations of all nodes in the tree in
samples ``u`` and ``v``, respectively.
u_total_count, v_total_count : float
The sum of the observations in samples ``u`` and ``v``, respectively.
Returns
-------
np.ndarray
The corrected branch lengths
"""
return (node_to_root_distances.ravel() *
(u_node_proportions + v_node_proportions)).sum()
|
anderspitman/scikit-bio
|
skbio/diversity/beta/_unifrac.py
|
Python
|
bsd-3-clause
| 23,175
|
[
"scikit-bio"
] |
7a0c0117d721561b92494d3a95f59a4473cd0238ba4ae2d3950214b2bf916237
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0091_auto_20150830_0950'),
]
operations = [
migrations.RemoveField(
model_name='visit',
name='improvementissues',
),
]
|
koebbe/homeworks
|
visit/migrations/0092_remove_visit_improvementissues.py
|
Python
|
mit
| 360
|
[
"VisIt"
] |
307674de9ae307138e8e9dcfd19a8e659090369979440fa645d1133637145f74
|
# -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numbers
import logging
import numpy as np
import traits.api as t
from scipy import constants
from hyperspy._signals.signal1d import Signal1D
from hyperspy.misc.elements import elements as elements_db
import hyperspy.axes
from hyperspy.decorators import only_interactive
from hyperspy.gui.eels import TEMParametersUI
from hyperspy.defaults_parser import preferences
import hyperspy.gui.messages as messagesui
from hyperspy.external.progressbar import progressbar
from hyperspy.components1d import PowerLaw
from hyperspy.misc.utils import isiterable, closest_power_of_two, underline
from hyperspy.misc.utils import without_nans
_logger = logging.getLogger(__name__)
class EELSSpectrum(Signal1D):
_signal_type = "EELS"
_alias_signal_types = ["TEM EELS"]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Attributes defaults
self.subshells = set()
self.elements = set()
self.edges = list()
if hasattr(self.metadata, 'Sample') and \
hasattr(self.metadata.Sample, 'elements'):
self.add_elements(self.metadata.Sample.elements)
self.metadata.Signal.binned = True
def add_elements(self, elements, include_pre_edges=False):
"""Declare the elemental composition of the sample.
The ionisation edges of the elements present in the current
energy range will be added automatically.
Parameters
----------
elements : tuple of strings
The symbol of the elements. Note this input must always be
in the form of a tuple. Meaning: add_elements(('C',)) will
work, while add_elements(('C')) will NOT work.
include_pre_edges : bool
If True, the ionization edges with an onset below the lower
energy limit of the SI will be incluided
Examples
--------
>>> s = hs.signals.EELSSpectrum(np.arange(1024))
>>> s.add_elements(('C', 'O'))
Adding C_K subshell
Adding O_K subshell
Raises
------
ValueError
"""
if not isiterable(elements) or isinstance(elements, str):
raise ValueError(
"Input must be in the form of a tuple. For example, "
"if `s` is the variable containing this EELS spectrum:\n "
">>> s.add_elements(('C',))\n"
"See the docstring for more information.")
for element in elements:
if isinstance(element, bytes):
element = element.decode()
if element in elements_db:
self.elements.add(element)
else:
raise ValueError(
"%s is not a valid symbol of a chemical element"
% element)
if not hasattr(self.metadata, 'Sample'):
self.metadata.add_node('Sample')
self.metadata.Sample.elements = list(self.elements)
if self.elements:
self.generate_subshells(include_pre_edges)
def generate_subshells(self, include_pre_edges=False):
"""Calculate the subshells for the current energy range for the
elements present in self.elements
Parameters
----------
include_pre_edges : bool
If True, the ionization edges with an onset below the lower
energy limit of the SI will be incluided
"""
Eaxis = self.axes_manager.signal_axes[0].axis
if not include_pre_edges:
start_energy = Eaxis[0]
else:
start_energy = 0.
end_energy = Eaxis[-1]
for element in self.elements:
e_shells = list()
for shell in elements_db[element][
'Atomic_properties']['Binding_energies']:
if shell[-1] != 'a':
energy = (elements_db[element]['Atomic_properties']
['Binding_energies'][shell]['onset_energy (eV)'])
if start_energy <= energy <= end_energy:
subshell = '%s_%s' % (element, shell)
if subshell not in self.subshells:
self.subshells.add(
'%s_%s' % (element, shell))
e_shells.append(subshell)
def estimate_zero_loss_peak_centre(self, mask=None):
"""Estimate the posision of the zero-loss peak.
This function provides just a coarse estimation of the position
of the zero-loss peak centre by computing the position of the maximum
of the spectra. For subpixel accuracy use `estimate_shift1D`.
Parameters
----------
mask : Signal1D of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
Returns
-------
zlpc : Signal1D subclass
The estimated position of the maximum of the ZLP peak.
Notes
-----
This function only works when the zero-loss peak is the most
intense feature in the spectrum. If it is not in most cases
the spectrum can be cropped to meet this criterium.
Alternatively use `estimate_shift1D`.
See Also
--------
estimate_shift1D, align_zero_loss_peak
"""
self._check_signal_dimension_equals_one()
self._check_navigation_mask(mask)
zlpc = self.valuemax(-1)
if mask is not None:
zlpc.data[mask.data] = np.nan
zlpc.set_signal_type("")
title = self.metadata.General.title
zlpc.metadata.General.title = "ZLP(%s)" % title
return zlpc
def align_zero_loss_peak(
self,
calibrate=True,
also_align=[],
print_stats=True,
subpixel=True,
mask=None,
signal_range=None,
show_progressbar=None,
**kwargs):
"""Align the zero-loss peak.
This function first aligns the spectra using the result of
`estimate_zero_loss_peak_centre` and afterward, if subpixel is True,
proceeds to align with subpixel accuracy using `align1D`. The offset
is automatically correct if `calibrate` is True.
Parameters
----------
calibrate : bool
If True, set the offset of the spectral axis so that the
zero-loss peak is at position zero.
also_align : list of signals
A list containing other spectra of identical dimensions to
align using the shifts applied to the current spectrum.
If `calibrate` is True, the calibration is also applied to
the spectra in the list.
print_stats : bool
If True, print summary statistics of the ZLP maximum before
the aligment.
subpixel : bool
If True, perform the alignment with subpixel accuracy
using cross-correlation.
mask : Signal1D of bool data type.
It must have signal_dimension = 0 and navigation_shape equal to the
current signal. Where mask is True the shift is not computed
and set to nan.
signal_range : tuple of integers, tuple of floats. Optional
Will only search for the ZLP within the signal_range. If given
in integers, the range will be in index values. If given floats,
the range will be in spectrum values. Useful if there are features
in the spectrum which are more intense than the ZLP.
Default is searching in the whole signal.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Examples
--------
>>>> s_ll.align_zero_loss_peak()
Aligning both the lowloss signal and another signal
>>>> s_ll.align_zero_loss_peak(also_align=[s])
Aligning within a narrow range of the lowloss signal
>>>> s_ll.align_zero_loss_peak(signal_range=(-10.,10.))
See Also
--------
estimate_zero_loss_peak_centre, align1D, estimate_shift1D.
Notes
-----
Any extra keyword arguments are passed to `align1D`. For
more information read its docstring.
"""
def substract_from_offset(value, signals):
for signal in signals:
signal.axes_manager[-1].offset -= value
def estimate_zero_loss_peak_centre(s, mask, signal_range):
if signal_range:
zlpc = s.isig[signal_range[0]:signal_range[1]].\
estimate_zero_loss_peak_centre(mask=mask)
else:
zlpc = s.estimate_zero_loss_peak_centre(mask=mask)
return zlpc
zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
mean_ = without_nans(zlpc.data).mean()
if print_stats is True:
print()
print(underline("Initial ZLP position statistics"))
zlpc.print_summary_statistics()
for signal in also_align + [self]:
signal.shift1D(-
zlpc.data +
mean_, show_progressbar=show_progressbar)
if calibrate is True:
zlpc = estimate_zero_loss_peak_centre(self, mask, signal_range)
substract_from_offset(without_nans(zlpc.data).mean(),
also_align + [self])
if subpixel is False:
return
left, right = -3., 3.
if calibrate is False:
mean_ = without_nans(estimate_zero_loss_peak_centre(
self, mask, signal_range).data).mean()
left += mean_
right += mean_
left = (left if left > self.axes_manager[-1].axis[0]
else self.axes_manager[-1].axis[0])
right = (right if right < self.axes_manager[-1].axis[-1]
else self.axes_manager[-1].axis[-1])
self.align1D(
left,
right,
also_align=also_align,
show_progressbar=show_progressbar,
**kwargs)
zlpc = self.estimate_zero_loss_peak_centre(mask=mask)
if calibrate is True:
substract_from_offset(without_nans(zlpc.data).mean(),
also_align + [self])
def estimate_elastic_scattering_intensity(
self, threshold, show_progressbar=None):
"""Rough estimation of the elastic scattering intensity by
truncation of a EELS low-loss spectrum.
Parameters
----------
threshold : {Signal1D, float, int}
Truncation energy to estimate the intensity of the elastic
scattering. The threshold can be provided as a signal of the same
dimension as the input spectrum navigation space containing the
threshold value in the energy units. Alternatively a constant
threshold can be specified in energy/index units by passing
float/int.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Returns
-------
I0: Signal1D
The elastic scattering intensity.
See Also
--------
estimate_elastic_scattering_threshold
"""
# TODO: Write units tests
self._check_signal_dimension_equals_one()
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
if isinstance(threshold, numbers.Number):
I0 = self.isig[:threshold].integrate1D(-1)
else:
I0 = self._get_navigation_signal()
I0.axes_manager.set_signal_dimension(0)
with progressbar(total=self.axes_manager.navigation_size,
disable=not show_progressbar,
leave=True) as pbar:
for i, (i0, th, s) in enumerate(zip(I0._iterate_signal(),
threshold._iterate_signal(),
self)):
if np.isnan(th[0]):
i0[:] = np.nan
else:
i0[:] = s.isig[:th[0]].integrate1D(-1).data
pbar.update(1)
I0.metadata.General.title = (
self.metadata.General.title + ' elastic intensity')
I0.set_signal_type("")
if self.tmp_parameters.has_item('filename'):
I0.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_elastic_intensity')
I0.tmp_parameters.folder = self.tmp_parameters.folder
I0.tmp_parameters.extension = \
self.tmp_parameters.extension
return I0
def estimate_elastic_scattering_threshold(self,
window=10.,
tol=None,
window_length=5,
polynomial_order=3,
start=1.):
"""Calculate the first inflexion point of the spectrum derivative
within a window.
This method assumes that the zero-loss peak is located at position zero
in all the spectra. Currently it looks for an inflexion point, that can
be a local maximum or minimum. Therefore, to estimate the elastic
scattering threshold `start` + `window` must be less than the first
maximum for all spectra (often the bulk plasmon maximum). If there is
more than one inflexion point in energy the window it selects the
smoother one what, often, but not always, is a good choice in this
case.
Parameters
----------
window : {None, float}
If None, the search for the local inflexion point is performed
using the full energy range. A positive float will restrict
the search to the (0,window] energy window, where window is given
in the axis units. If no inflexion point is found in this
spectral range the window value is returned instead.
tol : {None, float}
The threshold tolerance for the derivative. If "auto" it is
automatically calculated as the minimum value that guarantees
finding an inflexion point in all the spectra in given energy
range.
window_length : int
If non zero performs order three Savitzky-Golay smoothing
to the data to avoid falling in local minima caused by
the noise. It must be an odd interger.
polynomial_order : int
Savitzky-Golay filter polynomial order.
start : float
Position from the zero-loss peak centre from where to start
looking for the inflexion point.
Returns
-------
threshold : Signal1D
A Signal1D of the same dimension as the input spectrum
navigation space containing the estimated threshold. Where the
threshold couldn't be estimated the value is set to nan.
See Also
--------
estimate_elastic_scattering_intensity,align_zero_loss_peak,
find_peaks1D_ohaver, fourier_ratio_deconvolution.
Notes
-----
The main purpose of this method is to be used as input for
`estimate_elastic_scattering_intensity`. Indeed, for currently
achievable energy resolutions, there is not such a thing as a elastic
scattering threshold. Therefore, please be aware of the limitations of
this method when using it.
"""
self._check_signal_dimension_equals_one()
# Create threshold with the same shape as the navigation dims.
threshold = self._get_navigation_signal().transpose(signal_axes=0)
# Progress Bar
axis = self.axes_manager.signal_axes[0]
min_index, max_index = axis.value_range_to_indices(start,
start + window)
if max_index < min_index + 10:
raise ValueError("Please select a bigger window")
s = self.isig[min_index:max_index].deepcopy()
if window_length:
s.smooth_savitzky_golay(polynomial_order=polynomial_order,
window_length=window_length,
differential_order=1)
else:
s = s.diff(-1)
if tol is None:
tol = np.max(np.abs(s.data).min(axis.index_in_array))
saxis = s.axes_manager[-1]
inflexion = (np.abs(s.data) <= tol).argmax(saxis.index_in_array)
threshold.data[:] = saxis.index2value(inflexion)
if isinstance(inflexion, np.ndarray):
threshold.data[inflexion == 0] = np.nan
else: # Single spectrum
if inflexion == 0:
threshold.data[:] = np.nan
del s
if np.isnan(threshold.data).any():
_logger.warning(
"No inflexion point could be found in some positions "
"that have been marked with nans.")
# Create spectrum image, stop and return value
threshold.metadata.General.title = (
self.metadata.General.title +
' elastic scattering threshold')
if self.tmp_parameters.has_item('filename'):
threshold.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_elastic_scattering_threshold')
threshold.tmp_parameters.folder = self.tmp_parameters.folder
threshold.tmp_parameters.extension = \
self.tmp_parameters.extension
threshold.set_signal_type("")
return threshold
def estimate_thickness(self,
threshold,
zlp=None,):
"""Estimates the thickness (relative to the mean free path)
of a sample using the log-ratio method.
The current EELS spectrum must be a low-loss spectrum containing
the zero-loss peak. The hyperspectrum must be well calibrated
and aligned.
Parameters
----------
threshold : {Signal1D, float, int}
Truncation energy to estimate the intensity of the
elastic scattering. The threshold can be provided as a signal of
the same dimension as the input spectrum navigation space
containing the threshold value in the energy units. Alternatively a
constant threshold can be specified in energy/index units by
passing float/int.
zlp : {None, EELSSpectrum}
If not None the zero-loss peak intensity is calculated from the ZLP
spectrum supplied by integration using Simpson's rule. If None
estimates the zero-loss peak intensity using
`estimate_elastic_scattering_intensity` by truncation.
Returns
-------
s : Signal1D
The thickness relative to the MFP. It returns a Signal1D,
Signal2D or a BaseSignal, depending on the current navigation
dimensions.
Notes
-----
For details see: Egerton, R. Electron Energy-Loss
Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.
"""
# TODO: Write units tests
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
total_intensity = self.integrate1D(axis.index_in_array).data
if zlp is not None:
I0 = zlp.integrate1D(axis.index_in_array).data
else:
I0 = self.estimate_elastic_scattering_intensity(
threshold=threshold,).data
t_over_lambda = np.log(total_intensity / I0)
s = self._get_navigation_signal(data=t_over_lambda)
s.metadata.General.title = (self.metadata.General.title +
' $\\frac{t}{\\lambda}$')
if self.tmp_parameters.has_item('filename'):
s.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_relative_thickness')
s.tmp_parameters.folder = self.tmp_parameters.folder
s.tmp_parameters.extension = \
self.tmp_parameters.extension
s.axes_manager.set_signal_dimension(0)
s.set_signal_type("")
return s
def fourier_log_deconvolution(self,
zlp,
add_zlp=False,
crop=False):
"""Performs fourier-log deconvolution.
Parameters
----------
zlp : EELSSpectrum
The corresponding zero-loss peak.
add_zlp : bool
If True, adds the ZLP to the deconvolved spectrum
crop : bool
If True crop the spectrum to leave out the channels that
have been modified to decay smoothly to zero at the sides
of the spectrum.
Returns
-------
An EELSSpectrum containing the current data deconvolved.
Notes
-----
For details see: Egerton, R. Electron Energy-Loss
Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.
"""
self._check_signal_dimension_equals_one()
s = self.deepcopy()
zlp_size = zlp.axes_manager.signal_axes[0].size
self_size = self.axes_manager.signal_axes[0].size
tapped_channels = s.hanning_taper()
# Conservative new size to solve the wrap-around problem
size = zlp_size + self_size - 1
# Increase to the closest power of two to enhance the FFT
# performance
size = closest_power_of_two(size)
axis = self.axes_manager.signal_axes[0]
z = np.fft.rfft(zlp.data, n=size, axis=axis.index_in_array)
j = np.fft.rfft(s.data, n=size, axis=axis.index_in_array)
j1 = z * np.nan_to_num(np.log(j / z))
sdata = np.fft.irfft(j1, axis=axis.index_in_array)
s.data = sdata[s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, self_size)), ])]
if add_zlp is True:
if self_size >= zlp_size:
s.data[s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, zlp_size)), ])
] += zlp.data
else:
s.data += zlp.data[s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, self_size)), ])]
s.metadata.General.title = (s.metadata.General.title +
' after Fourier-log deconvolution')
if s.tmp_parameters.has_item('filename'):
s.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_after_fourier_log_deconvolution')
if crop is True:
s.crop(axis.index_in_axes_manager,
None, int(-tapped_channels))
return s
def fourier_ratio_deconvolution(self, ll,
fwhm=None,
threshold=None,
extrapolate_lowloss=True,
extrapolate_coreloss=True):
"""Performs Fourier-ratio deconvolution.
The core-loss should have the background removed. To reduce
the noise amplication the result is convolved with a
Gaussian function.
Parameters
----------
ll: EELSSpectrum
The corresponding low-loss (ll) EELSSpectrum.
fwhm : float or None
Full-width half-maximum of the Gaussian function by which
the result of the deconvolution is convolved. It can be
used to select the final SNR and spectral resolution. If
None, the FWHM of the zero-loss peak of the low-loss is
estimated and used.
threshold : {None, float}
Truncation energy to estimate the intensity of the
elastic scattering. If None the threshold is taken as the
first minimum after the ZLP centre.
extrapolate_lowloss, extrapolate_coreloss : bool
If True the signals are extrapolated using a power law,
Notes
-----
For details see: Egerton, R. Electron Energy-Loss
Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.
"""
self._check_signal_dimension_equals_one()
orig_cl_size = self.axes_manager.signal_axes[0].size
if threshold is None:
threshold = ll.estimate_elastic_scattering_threshold()
if extrapolate_coreloss is True:
cl = self.power_law_extrapolation(
window_size=20,
extrapolation_size=100)
else:
cl = self.deepcopy()
if extrapolate_lowloss is True:
ll = ll.power_law_extrapolation(
window_size=100,
extrapolation_size=100)
else:
ll = ll.deepcopy()
ll.hanning_taper()
cl.hanning_taper()
ll_size = ll.axes_manager.signal_axes[0].size
cl_size = self.axes_manager.signal_axes[0].size
# Conservative new size to solve the wrap-around problem
size = ll_size + cl_size - 1
# Increase to the closest multiple of two to enhance the FFT
# performance
size = int(2 ** np.ceil(np.log2(size)))
axis = ll.axes_manager.signal_axes[0]
if fwhm is None:
fwhm = float(ll.get_current_signal().estimate_peak_width()())
_logger.info("FWHM = %1.2f" % fwhm)
I0 = ll.estimate_elastic_scattering_intensity(threshold=threshold)
I0 = I0.data
if ll.axes_manager.navigation_size > 0:
I0_shape = list(I0.shape)
I0_shape.insert(axis.index_in_array, 1)
I0 = I0.reshape(I0_shape)
from hyperspy.components1d import Gaussian
g = Gaussian()
g.sigma.value = fwhm / 2.3548
g.A.value = 1
g.centre.value = 0
zl = g.function(
np.linspace(axis.offset,
axis.offset + axis.scale * (size - 1),
size))
z = np.fft.rfft(zl)
jk = np.fft.rfft(cl.data, n=size, axis=axis.index_in_array)
jl = np.fft.rfft(ll.data, n=size, axis=axis.index_in_array)
zshape = [1, ] * len(cl.data.shape)
zshape[axis.index_in_array] = jk.shape[axis.index_in_array]
cl.data = np.fft.irfft(z.reshape(zshape) * jk / jl,
axis=axis.index_in_array)
cl.data *= I0
cl.crop(-1, None, int(orig_cl_size))
cl.metadata.General.title = (self.metadata.General.title +
' after Fourier-ratio deconvolution')
if cl.tmp_parameters.has_item('filename'):
cl.tmp_parameters.filename = (
self.tmp_parameters.filename +
'after_fourier_ratio_deconvolution')
return cl
def richardson_lucy_deconvolution(self, psf, iterations=15, mask=None,
show_progressbar=None):
"""1D Richardson-Lucy Poissonian deconvolution of
the spectrum by the given kernel.
Parameters
----------
iterations: int
Number of iterations of the deconvolution. Note that
increasing the value will increase the noise amplification.
psf: EELSSpectrum
It must have the same signal dimension as the current
spectrum and a spatial dimension of 0 or the same as the
current spectrum.
show_progressbar : None or bool
If True, display a progress bar. If None the default is set in
`preferences`.
Notes:
-----
For details on the algorithm see Gloter, A., A. Douiri,
M. Tence, and C. Colliex. “Improving Energy Resolution of
EELS Spectra: An Alternative to the Monochromator Solution.”
Ultramicroscopy 96, no. 3–4 (September 2003): 385–400.
"""
if show_progressbar is None:
show_progressbar = preferences.General.show_progressbar
self._check_signal_dimension_equals_one()
ds = self.deepcopy()
ds.data = ds.data.copy()
ds.metadata.General.title += (
' after Richardson-Lucy deconvolution %i iterations' %
iterations)
if ds.tmp_parameters.has_item('filename'):
ds.tmp_parameters.filename += (
'_after_R-L_deconvolution_%iiter' % iterations)
psf_size = psf.axes_manager.signal_axes[0].size
kernel = psf()
imax = kernel.argmax()
j = 0
maxval = self.axes_manager.navigation_size
show_progressbar = show_progressbar and (maxval > 0)
for D in progressbar(self, total=maxval,
disable=not show_progressbar,
leave=True):
D = D.data.copy()
if psf.axes_manager.navigation_dimension != 0:
kernel = psf(axes_manager=self.axes_manager)
imax = kernel.argmax()
s = ds(axes_manager=self.axes_manager)
mimax = psf_size - 1 - imax
O = D.copy()
for i in range(iterations):
first = np.convolve(kernel, O)[imax: imax + psf_size]
O = O * (np.convolve(kernel[::-1],
D / first)[mimax: mimax + psf_size])
s[:] = O
j += 1
return ds
def _are_microscope_parameters_missing(self):
"""Check if the EELS parameters necessary to calculate the GOS
are defined in metadata. If not, in interactive mode
raises an UI item to fill the values"""
must_exist = (
'Acquisition_instrument.TEM.convergence_angle',
'Acquisition_instrument.TEM.beam_energy',
'Acquisition_instrument.TEM.Detector.EELS.collection_angle',)
missing_parameters = []
for item in must_exist:
exists = self.metadata.has_item(item)
if exists is False:
missing_parameters.append(item)
if missing_parameters:
if preferences.General.interactive is True:
par_str = "The following parameters are missing:\n"
for par in missing_parameters:
par_str += '%s\n' % par
par_str += 'Please set them in the following wizard'
is_ok = messagesui.information(par_str)
if is_ok:
self._set_microscope_parameters()
else:
return True
else:
return True
else:
return False
def set_microscope_parameters(self,
beam_energy=None,
convergence_angle=None,
collection_angle=None):
"""Set the microscope parameters that are necessary to calculate
the GOS.
If not all of them are defined, raises in interactive mode
raises an UI item to fill the values
beam_energy: float
The energy of the electron beam in keV
convengence_angle : float
The microscope convergence semi-angle in mrad.
collection_angle : float
The collection semi-angle in mrad.
"""
mp = self.metadata
if beam_energy is not None:
mp.set_item("Acquisition_instrument.TEM.beam_energy", beam_energy)
if convergence_angle is not None:
mp.set_item(
"Acquisition_instrument.TEM.convergence_angle",
convergence_angle)
if collection_angle is not None:
mp.set_item(
"Acquisition_instrument.TEM.Detector.EELS.collection_angle",
collection_angle)
self._are_microscope_parameters_missing()
@only_interactive
def _set_microscope_parameters(self):
tem_par = TEMParametersUI()
mapping = {
'Acquisition_instrument.TEM.convergence_angle':
'tem_par.convergence_angle',
'Acquisition_instrument.TEM.beam_energy':
'tem_par.beam_energy',
'Acquisition_instrument.TEM.Detector.EELS.collection_angle':
'tem_par.collection_angle',
}
for key, value in mapping.items():
if self.metadata.has_item(key):
exec('%s = self.metadata.%s' % (value, key))
tem_par.edit_traits()
mapping = {
'Acquisition_instrument.TEM.convergence_angle':
tem_par.convergence_angle,
'Acquisition_instrument.TEM.beam_energy':
tem_par.beam_energy,
'Acquisition_instrument.TEM.Detector.EELS.collection_angle':
tem_par.collection_angle,
}
for key, value in mapping.items():
if value != t.Undefined:
self.metadata.set_item(key, value)
self._are_microscope_parameters_missing()
def power_law_extrapolation(self,
window_size=20,
extrapolation_size=1024,
add_noise=False,
fix_neg_r=False):
"""Extrapolate the spectrum to the right using a powerlaw
Parameters
----------
window_size : int
The number of channels from the right side of the
spectrum that are used to estimate the power law
parameters.
extrapolation_size : int
Size of the extrapolation in number of channels
add_noise : bool
If True, add poissonian noise to the extrapolated spectrum.
fix_neg_r : bool
If True, the negative values for the "components.PowerLaw"
parameter r will be flagged and the extrapolation will be
done with a constant zero-value.
Returns
-------
A new spectrum, with the extrapolation.
"""
self._check_signal_dimension_equals_one()
axis = self.axes_manager.signal_axes[0]
s = self.deepcopy()
s.metadata.General.title += (
' %i channels extrapolated' %
extrapolation_size)
if s.tmp_parameters.has_item('filename'):
s.tmp_parameters.filename += (
'_%i_channels_extrapolated' % extrapolation_size)
new_shape = list(self.data.shape)
new_shape[axis.index_in_array] += extrapolation_size
s.data = np.zeros(new_shape)
s.get_dimensions_from_data()
s.data[..., :axis.size] = self.data
pl = PowerLaw()
pl._axes_manager = self.axes_manager
pl.estimate_parameters(
s, axis.index2value(axis.size - window_size),
axis.index2value(axis.size - 1))
if fix_neg_r is True:
_r = pl.r.map['values']
_A = pl.A.map['values']
_A[_r <= 0] = 0
pl.A.map['values'] = _A
# If the signal is binned we need to bin the extrapolated power law
# what, in a first approximation, can be done by multiplying by the
# axis step size.
if self.metadata.Signal.binned is True:
factor = s.axes_manager[-1].scale
else:
factor = 1
s.data[..., axis.size:] = (
factor * pl.A.map['values'][..., np.newaxis] *
s.axes_manager.signal_axes[0].axis[np.newaxis, axis.size:] ** (
-pl.r.map['values'][..., np.newaxis]))
return s
def kramers_kronig_analysis(self,
zlp=None,
iterations=1,
n=None,
t=None,
delta=0.5,
full_output=False):
"""Calculate the complex
dielectric function from a single scattering distribution (SSD) using
the Kramers-Kronig relations.
It uses the FFT method as in [Egerton2011]_. The SSD is an
EELSSpectrum instance containing SSD low-loss EELS with no zero-loss
peak. The internal loop is devised to approximately subtract the
surface plasmon contribution supposing an unoxidized planar surface and
neglecting coupling between the surfaces. This method does not account
for retardation effects, instrumental broading and surface plasmon
excitation in particles.
Note that either refractive index or thickness are required.
If both are None or if both are provided an exception is raised.
Parameters
----------
zlp: {None, number, Signal1D}
ZLP intensity. It is optional (can be None) if `t` is None and `n`
is not None and the thickness estimation is not required. If `t`
is not None, the ZLP is required to perform the normalization and
if `t` is not None, the ZLP is required to calculate the thickness.
If the ZLP is the same for all spectra, the integral of the ZLP
can be provided as a number. Otherwise, if the ZLP intensity is not
the same for all spectra, it can be provided as i) a Signal1D
of the same dimensions as the current signal containing the ZLP
spectra for each location ii) a BaseSignal of signal dimension 0
and navigation_dimension equal to the current signal containing the
integrated ZLP intensity.
iterations: int
Number of the iterations for the internal loop to remove the
surface plasmon contribution. If 1 the surface plasmon contribution
is not estimated and subtracted (the default is 1).
n: {None, float}
The medium refractive index. Used for normalization of the
SSD to obtain the energy loss function. If given the thickness
is estimated and returned. It is only required when `t` is None.
t: {None, number, Signal1D}
The sample thickness in nm. Used for normalization of the
SSD to obtain the energy loss function. It is only required when
`n` is None. If the thickness is the same for all spectra it can be
given by a number. Otherwise, it can be provided as a BaseSignal with
signal dimension 0 and navigation_dimension equal to the current
signal.
delta : float
A small number (0.1-0.5 eV) added to the energy axis in
specific steps of the calculation the surface loss correction to
improve stability.
full_output : bool
If True, return a dictionary that contains the estimated
thickness if `t` is None and the estimated surface plasmon
excitation and the spectrum corrected from surface plasmon
excitations if `iterations` > 1.
Returns
-------
eps: DielectricFunction instance
The complex dielectric function results,
$\epsilon = \epsilon_1 + i*\epsilon_2$,
contained in an DielectricFunction instance.
output: Dictionary (optional)
A dictionary of optional outputs with the following keys:
``thickness``
The estimated thickness in nm calculated by normalization of
the SSD (only when `t` is None)
``surface plasmon estimation``
The estimated surface plasmon excitation (only if
`iterations` > 1.)
Raises
------
ValuerError
If both `n` and `t` are undefined (None).
AttribureError
If the beam_energy or the collection semi-angle are not defined in
metadata.
Notes
-----
This method is based in Egerton's Matlab code [Egerton2011]_ with some
minor differences:
* The integrals are performed using the simpsom rule instead of using
a summation.
* The wrap-around problem when computing the ffts is workarounded by
padding the signal instead of substracting the reflected tail.
.. [Egerton2011] Ray Egerton, "Electron Energy-Loss
Spectroscopy in the Electron Microscope", Springer-Verlag, 2011.
"""
output = {}
if iterations == 1:
# In this case s.data is not modified so there is no need to make
# a deep copy.
s = self.isig[0.:]
else:
s = self.isig[0.:].deepcopy()
sorig = self.isig[0.:]
# Avoid singularity at 0
if s.axes_manager.signal_axes[0].axis[0] == 0:
s = s.isig[1:]
sorig = self.isig[1:]
# Constants and units
me = constants.value(
'electron mass energy equivalent in MeV') * 1e3 # keV
# Mapped parameters
try:
e0 = s.metadata.Acquisition_instrument.TEM.beam_energy
except:
raise AttributeError("Please define the beam energy."
"You can do this e.g. by using the "
"set_microscope_parameters method")
try:
beta = s.metadata.Acquisition_instrument.TEM.Detector.\
EELS.collection_angle
except:
raise AttributeError("Please define the collection semi-angle. "
"You can do this e.g. by using the "
"set_microscope_parameters method")
axis = s.axes_manager.signal_axes[0]
eaxis = axis.axis.copy()
if isinstance(zlp, hyperspy.signal.BaseSignal):
if (zlp.axes_manager.navigation_dimension ==
self.axes_manager.navigation_dimension):
if zlp.axes_manager.signal_dimension == 0:
i0 = zlp.data
else:
i0 = zlp.integrate1D(axis.index_in_axes_manager).data
else:
raise ValueError('The ZLP signal dimensions are not '
'compatible with the dimensions of the '
'low-loss signal')
i0 = i0.reshape(
np.insert(i0.shape, axis.index_in_array, 1))
elif isinstance(zlp, numbers.Number):
i0 = zlp
else:
raise ValueError('The zero-loss peak input is not valid, it must be\
in the BaseSignal class or a Number.')
if isinstance(t, hyperspy.signal.BaseSignal):
if (t.axes_manager.navigation_dimension ==
self.axes_manager.navigation_dimension) and (
t.axes_manager.signal_dimension == 0):
t = t.data
t = t.reshape(
np.insert(t.shape, axis.index_in_array, 1))
else:
raise ValueError('The thickness signal dimensions are not '
'compatible with the dimensions of the '
'low-loss signal')
elif isinstance(t, np.ndarray) and t.shape and t.shape != (1,):
raise ValueError("thickness must be a HyperSpy signal or a number,"
" not a numpy array.")
# Slicer to get the signal data from 0 to axis.size
slicer = s.axes_manager._get_data_slice(
[(axis.index_in_array, slice(None, axis.size)), ])
# Kinetic definitions
ke = e0 * (1 + e0 / 2. / me) / (1 + e0 / me) ** 2
tgt = e0 * (2 * me + e0) / (me + e0)
rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)
for io in range(iterations):
# Calculation of the ELF by normalization of the SSD
# Norm(SSD) = Imag(-1/epsilon) (Energy Loss Funtion, ELF)
# We start by the "angular corrections"
Im = s.data / (np.log(1 + (beta * tgt / eaxis) ** 2)) / axis.scale
if n is None and t is None:
raise ValueError("The thickness and the refractive index are "
"not defined. Please provide one of them.")
elif n is not None and t is not None:
raise ValueError("Please provide the refractive index OR the "
"thickness information, not both")
elif n is not None:
# normalize using the refractive index.
K = (Im / eaxis).sum(axis=axis.index_in_array) * axis.scale
K = (K / (np.pi / 2) / (1 - 1. / n ** 2)).reshape(
np.insert(K.shape, axis.index_in_array, 1))
# Calculate the thickness only if possible and required
if zlp is not None and (full_output is True or
iterations > 1):
te = (332.5 * K * ke / i0)
if full_output is True:
output['thickness'] = te
elif t is not None:
if zlp is None:
raise ValueError("The ZLP must be provided when the "
"thickness is used for normalization.")
# normalize using the thickness
K = t * i0 / (332.5 * ke)
te = t
Im = Im / K
# Kramers Kronig Transform:
# We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT
# Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490
# Use a size that is a power of two to speed up the fft and
# make it double the closest upper value to workaround the
# wrap-around problem.
esize = 2 * closest_power_of_two(axis.size)
q = -2 * np.fft.fft(Im, esize,
axis.index_in_array).imag / esize
q[slicer] *= -1
q = np.fft.fft(q, axis=axis.index_in_array)
# Final touch, we have Re(1/eps)
Re = q[slicer].real + 1
# Egerton does this to correct the wrap-around problem, but in our
# case this is not necessary because we compute the fft on an
# extended and padded spectrum to avoid this problem.
# Re=real(q)
# Tail correction
# vm=Re[axis.size-1]
# Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /
# (axis.size*2-arange(0,axis.size-1)))**2)
# Re[axis.size:]=1+(0.5*vm*((axis.size-1) /
# (axis.size+arange(0,axis.size)))**2)
# Epsilon appears:
# We calculate the real and imaginary parts of the CDF
e1 = Re / (Re ** 2 + Im ** 2)
e2 = Im / (Re ** 2 + Im ** 2)
if iterations > 1 and zlp is not None:
# Surface losses correction:
# Calculates the surface ELF from a vaccumm border effect
# A simulated surface plasmon is subtracted from the ELF
Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2 ** 2) - Im
adep = (tgt / (eaxis + delta) *
np.arctan(beta * tgt / axis.axis) -
beta / 1000. /
(beta ** 2 + axis.axis ** 2. / tgt ** 2))
Srfint = 2000 * K * adep * Srfelf / rk0 / te * axis.scale
s.data = sorig.data - Srfint
_logger.debug('Iteration number: %d / %d', io + 1, iterations)
if iterations == io + 1 and full_output is True:
sp = sorig._deepcopy_with_new_data(Srfint)
sp.metadata.General.title += (
" estimated surface plasmon excitation.")
output['surface plasmon estimation'] = sp
del sp
del Srfint
eps = s._deepcopy_with_new_data(e1 + e2 * 1j)
del s
eps.set_signal_type("DielectricFunction")
eps.metadata.General.title = (self.metadata.General.title +
'dielectric function '
'(from Kramers-Kronig analysis)')
if eps.tmp_parameters.has_item('filename'):
eps.tmp_parameters.filename = (
self.tmp_parameters.filename +
'_CDF_after_Kramers_Kronig_transform')
if 'thickness' in output:
thickness = eps._get_navigation_signal(
data=te[self.axes_manager._get_data_slice(
[(axis.index_in_array, 0)])])
thickness.metadata.General.title = (
self.metadata.General.title + ' thickness '
'(calculated using Kramers-Kronig analysis)')
output['thickness'] = thickness
if full_output is False:
return eps
else:
return eps, output
def create_model(self, ll=None, auto_background=True, auto_add_edges=True,
GOS=None, dictionary=None):
"""Create a model for the current EELS data.
Parameters
----------
ll : EELSSpectrum, optional
If an EELSSpectrum is provided, it will be assumed that it is
a low-loss EELS spectrum, and it will be used to simulate the
effect of multiple scattering by convolving it with the EELS
spectrum.
auto_background : boolean, default True
If True, and if spectrum is an EELS instance adds automatically
a powerlaw to the model and estimate the parameters by the
two-area method.
auto_add_edges : boolean, default True
If True, and if spectrum is an EELS instance, it will
automatically add the ionization edges as defined in the
Signal1D instance. Adding a new element to the spectrum using
the components.EELSSpectrum.add_elements method automatically
add the corresponding ionisation edges to the model.
GOS : {'hydrogenic' | 'Hartree-Slater'}, optional
The generalized oscillation strenght calculations to use for the
core-loss EELS edges. If None the Hartree-Slater GOS are used if
available, otherwise it uses the hydrogenic GOS.
dictionary : {None | dict}, optional
A dictionary to be used to recreate a model. Usually generated
using :meth:`hyperspy.model.as_dictionary`
Returns
-------
model : `EELSModel` instance.
"""
from hyperspy.models.eelsmodel import EELSModel
model = EELSModel(self,
ll=ll,
auto_background=auto_background,
auto_add_edges=auto_add_edges,
GOS=GOS,
dictionary=dictionary)
return model
|
vidartf/hyperspy
|
hyperspy/_signals/eels.py
|
Python
|
gpl-3.0
| 52,216
|
[
"Gaussian"
] |
25593f2a8b2703b005b043d915fbb332754bbeef3e73e3d35d1e4b61bbb05709
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility functions that act on molecule objects."""
from typing import Dict, Tuple, Union
import numpy as np
import qcelemental as qcel
from psi4 import core
from psi4.driver.p4util import temp_circular_import_blocker
from psi4.driver import qcdb
from psi4.driver.p4util.exceptions import *
def molecule_set_attr(self, name, value):
"""Function to redefine __setattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "set_variable")
fxn(name, value)
return
object.__setattr__(self, name, value)
def molecule_get_attr(self, name):
"""Function to redefine __getattr__ method of molecule class."""
fxn = object.__getattribute__(self, "is_variable")
isvar = fxn(name)
if isvar:
fxn = object.__getattribute__(self, "get_variable")
return fxn(name)
return object.__getattribute__(self, name)
@classmethod
def _molecule_from_string(cls,
molstr,
dtype=None,
name=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
return_dict=False,
enable_qm=True,
enable_efp=True,
missing_enabled_return_qm='none',
missing_enabled_return_efp='none',
verbose=1):
molrec = qcel.molparse.from_string(
molstr=molstr,
dtype=dtype,
name=name,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
return_processed=False,
enable_qm=enable_qm,
enable_efp=enable_efp,
missing_enabled_return_qm=missing_enabled_return_qm,
missing_enabled_return_efp=missing_enabled_return_efp,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec['qm']), molrec
else:
return core.Molecule.from_dict(molrec['qm'])
@classmethod
def _molecule_from_arrays(cls,
geom=None,
elea=None,
elez=None,
elem=None,
mass=None,
real=None,
elbl=None,
name=None,
units='Angstrom',
input_units_to_au=None,
fix_com=None,
fix_orientation=None,
fix_symmetry=None,
fragment_separators=None,
fragment_charges=None,
fragment_multiplicities=None,
molecular_charge=None,
molecular_multiplicity=None,
comment=None,
provenance=None,
connectivity=None,
missing_enabled_return='error',
tooclose=0.1,
zero_ghost_fragments=False,
nonphysical=False,
mtol=1.e-3,
verbose=1,
return_dict=False):
"""Construct Molecule from unvalidated arrays and variables.
Light wrapper around :py:func:`~qcelemental.molparse.from_arrays`
that is a full-featured constructor to dictionary representa-
tion of Molecule. This follows one step further to return
Molecule instance.
Parameters
----------
See :py:func:`~qcelemental.molparse.from_arrays`.
Returns
-------
:py:class:`psi4.core.Molecule`
"""
molrec = qcel.molparse.from_arrays(
geom=geom,
elea=elea,
elez=elez,
elem=elem,
mass=mass,
real=real,
elbl=elbl,
name=name,
units=units,
input_units_to_au=input_units_to_au,
fix_com=fix_com,
fix_orientation=fix_orientation,
fix_symmetry=fix_symmetry,
fragment_separators=fragment_separators,
fragment_charges=fragment_charges,
fragment_multiplicities=fragment_multiplicities,
molecular_charge=molecular_charge,
molecular_multiplicity=molecular_multiplicity,
comment=comment,
provenance=provenance,
connectivity=connectivity,
domain='qm',
missing_enabled_return=missing_enabled_return,
tooclose=tooclose,
zero_ghost_fragments=zero_ghost_fragments,
nonphysical=nonphysical,
mtol=mtol,
verbose=verbose)
if return_dict:
return core.Molecule.from_dict(molrec), molrec
else:
return core.Molecule.from_dict(molrec)
@classmethod
def _molecule_from_schema(cls, molschema: Dict, return_dict: bool = False, nonphysical: bool = False, verbose: int = 1) -> Union[core.Molecule, Tuple[core.Molecule, Dict]]:
"""Construct Molecule from non-Psi4 schema.
Light wrapper around :py:func:`~psi4.core.Molecule.from_arrays`.
Parameters
----------
molschema
Dictionary form of Molecule following known schema.
return_dict
Additionally return Molecule dictionary intermediate.
nonphysical
Do allow masses outside an element's natural range to pass validation?
verbose
Amount of printing.
Returns
-------
mol : :py:class:`psi4.core.Molecule`
molrec : dict
Dictionary representation of instance.
Only provided if `return_dict` is True.
"""
molrec = qcel.molparse.from_schema(molschema, nonphysical=nonphysical, verbose=verbose)
qmol = core.Molecule.from_dict(molrec)
geom = np.array(molrec["geom"]).reshape((-1, 3))
qmol._initial_cartesian = core.Matrix.from_array(geom)
if return_dict:
return qmol, molrec
else:
return qmol
def dynamic_variable_bind(cls):
"""Function to dynamically add extra members to
the core.Molecule class.
"""
cls.__setattr__ = molecule_set_attr
cls.__getattr__ = molecule_get_attr
cls.to_arrays = qcdb.Molecule.to_arrays
cls.to_dict = qcdb.Molecule.to_dict
cls.BFS = qcdb.Molecule.BFS
cls.B787 = qcdb.Molecule.B787
cls.scramble = qcdb.Molecule.scramble
cls.from_arrays = _molecule_from_arrays
cls.from_string = _molecule_from_string
cls.to_string = qcdb.Molecule.to_string
cls.from_schema = _molecule_from_schema
cls.to_schema = qcdb.Molecule.to_schema
cls.run_dftd3 = qcdb.Molecule.run_dftd3
cls.run_dftd4 = qcdb.Molecule.run_dftd4
cls.run_gcp= qcdb.Molecule.run_gcp
cls.format_molecule_for_mol = qcdb.Molecule.format_molecule_for_mol
dynamic_variable_bind(core.Molecule) # pass class type, not class instance
#
# Define geometry to be used by PSI4.
# The molecule created by this will be set in options.
#
# geometry("
# O 1.0 0.0 0.0
# H 0.0 1.0 0.0
# H 0.0 0.0 0.0
#
def geometry(geom, name="default"):
"""Function to create a molecule object of name *name* from the
geometry in string *geom*. Permitted for user use but deprecated
in driver in favor of explicit molecule-passing. Comments within
the string are filtered.
"""
molrec = qcel.molparse.from_string(
geom, enable_qm=True, missing_enabled_return_qm='minimal', enable_efp=True, missing_enabled_return_efp='none')
molecule = core.Molecule.from_dict(molrec['qm'])
if "geom" in molrec["qm"]:
geom = np.array(molrec["qm"]["geom"]).reshape((-1, 3))
if molrec["qm"]["units"] == "Angstrom":
geom = geom / qcel.constants.bohr2angstroms
molecule._initial_cartesian = core.Matrix.from_array(geom)
molecule.set_name(name)
if 'efp' in molrec:
try:
import pylibefp
except ImportError as e: # py36 ModuleNotFoundError
raise ImportError("""Install pylibefp to use EFP functionality. `conda install pylibefp -c psi4` Or build with `-DENABLE_libefp=ON`""") from e
#print('Using pylibefp: {} (version {})'.format(pylibefp.__file__, pylibefp.__version__))
efpobj = pylibefp.from_dict(molrec['efp'])
# pylibefp.core.efp rides along on molecule
molecule.EFP = efpobj
# Attempt to go ahead and construct the molecule
try:
molecule.update_geometry()
except:
core.print_out("Molecule: geometry: Molecule is not complete, please use 'update_geometry'\n"
" once all variables are set.\n")
activate(molecule)
return molecule
def activate(mol):
"""Function to set molecule object *mol* as the current active molecule.
Permitted for user use but deprecated in driver in favor of explicit
molecule-passing.
"""
core.set_active_molecule(mol)
|
susilehtola/psi4
|
psi4/driver/molutil.py
|
Python
|
lgpl-3.0
| 9,874
|
[
"Psi4"
] |
700d52926611a222d6541bd59a6b0ee1f3dc6ce9a30667fa0f9ce4339b66c191
|
# -*- coding: utf-8 -*-
"""
Part of the astor library for Python AST manipulation.
License: 3-clause BSD
Copyright (c) 2008 Armin Ronacher
Copyright (c) 2012-2017 Patrick Maupin
Copyright (c) 2013-2017 Berker Peksag
This module converts an AST into Python source code.
Before being version-controlled as part of astor,
this code came from here (in 2012):
https://gist.github.com/1250562
"""
import ast
import sys
from .op_util import get_op_symbol, get_op_precedence, Precedence
from .node_util import ExplicitNodeVisitor
from .string_repr import pretty_string
from .source_repr import pretty_source
def to_source(node, indent_with=' ' * 4, add_line_information=False,
pretty_string=pretty_string, pretty_source=pretty_source):
"""This function can convert a node tree back into python sourcecode.
This is useful for debugging purposes, especially if you're dealing with
custom asts not generated by python itself.
It could be that the sourcecode is evaluable when the AST itself is not
compilable / evaluable. The reason for this is that the AST contains some
more data than regular sourcecode does, which is dropped during
conversion.
Each level of indentation is replaced with `indent_with`. Per default this
parameter is equal to four spaces as suggested by PEP 8, but it might be
adjusted to match the application's styleguide.
If `add_line_information` is set to `True` comments for the line numbers
of the nodes are added to the output. This can be used to spot wrong line
number information of statement nodes.
"""
generator = SourceGenerator(indent_with, add_line_information,
pretty_string)
generator.visit(node)
generator.result.append('\n')
if set(generator.result[0]) == set('\n'):
generator.result[0] = ''
return pretty_source(generator.result)
def precedence_setter(AST=ast.AST, get_op_precedence=get_op_precedence,
isinstance=isinstance, list=list):
""" This only uses a closure for performance reasons,
to reduce the number of attribute lookups. (set_precedence
is called a lot of times.)
"""
def set_precedence(value, *nodes):
"""Set the precedence (of the parent) into the children.
"""
if isinstance(value, AST):
value = get_op_precedence(value)
for node in nodes:
if isinstance(node, AST):
node._pp = value
elif isinstance(node, list):
set_precedence(value, *node)
else:
assert node is None, node
return set_precedence
set_precedence = precedence_setter()
class Delimit(object):
"""A context manager that can add enclosing
delimiters around the output of a
SourceGenerator method. By default, the
parentheses are added, but the enclosed code
may set discard=True to get rid of them.
"""
discard = False
def __init__(self, tree, *args):
""" use write instead of using result directly
for initial data, because it may flush
preceding data into result.
"""
delimiters = '()'
node = None
op = None
for arg in args:
if isinstance(arg, ast.AST):
if node is None:
node = arg
else:
op = arg
else:
delimiters = arg
tree.write(delimiters[0])
result = self.result = tree.result
self.index = len(result)
self.closing = delimiters[1]
if node is not None:
self.p = p = get_op_precedence(op or node)
self.pp = pp = tree.get__pp(node)
self.discard = p >= pp
def __enter__(self):
return self
def __exit__(self, *exc_info):
result = self.result
start = self.index - 1
if self.discard:
result[start] = ''
else:
result.append(self.closing)
class SourceGenerator(ExplicitNodeVisitor):
"""This visitor is able to transform a well formed syntax tree into Python
sourcecode.
For more details have a look at the docstring of the `node_to_source`
function.
"""
using_unicode_literals = False
def __init__(self, indent_with, add_line_information=False,
pretty_string=pretty_string,
# constants
len=len, isinstance=isinstance, callable=callable):
self.result = []
self.indent_with = indent_with
self.add_line_information = add_line_information
self.indentation = 0 # Current indentation level
self.new_lines = 0 # Number of lines to insert before next code
self.colinfo = 0, 0 # index in result of string containing linefeed, and
# position of last linefeed in that string
self.pretty_string = pretty_string
AST = ast.AST
visit = self.visit
newline = self.newline
result = self.result
append = result.append
def write(*params):
""" self.write is a closure for performance (to reduce the number
of attribute lookups).
"""
for item in params:
if isinstance(item, AST):
visit(item)
elif callable(item):
item()
elif item == '\n':
newline()
else:
if self.new_lines:
append('\n' * self.new_lines)
self.colinfo = len(result), 0
append(self.indent_with * self.indentation)
self.new_lines = 0
if item:
append(item)
self.write = write
def __getattr__(self, name, defaults=dict(keywords=(),
_pp=Precedence.highest).get):
""" Get an attribute of the node.
like dict.get (returns None if doesn't exist)
"""
if not name.startswith('get_'):
raise AttributeError
geta = getattr
shortname = name[4:]
default = defaults(shortname)
def getter(node):
return geta(node, shortname, default)
setattr(self, name, getter)
return getter
def delimit(self, *args):
return Delimit(self, *args)
def conditional_write(self, *stuff):
if stuff[-1] is not None:
self.write(*stuff)
# Inform the caller that we wrote
return True
def newline(self, node=None, extra=0):
self.new_lines = max(self.new_lines, 1 + extra)
if node is not None and self.add_line_information:
self.write('# line: %s' % node.lineno)
self.new_lines = 1
def body(self, statements):
self.indentation += 1
self.write(*statements)
self.indentation -= 1
def else_body(self, elsewhat):
if elsewhat:
self.write('\n', 'else:')
self.body(elsewhat)
def body_or_else(self, node):
self.body(node.body)
self.else_body(node.orelse)
def visit_arguments(self, node):
want_comma = []
def write_comma():
if want_comma:
self.write(', ')
else:
want_comma.append(True)
def loop_args(args, defaults):
set_precedence(Precedence.Comma, defaults)
padding = [None] * (len(args) - len(defaults))
for arg, default in zip(args, padding + defaults):
self.write(write_comma, arg)
self.conditional_write('=', default)
loop_args(node.args, node.defaults)
self.conditional_write(write_comma, '*', node.vararg)
kwonlyargs = self.get_kwonlyargs(node)
if kwonlyargs:
if node.vararg is None:
self.write(write_comma, '*')
loop_args(kwonlyargs, node.kw_defaults)
self.conditional_write(write_comma, '**', node.kwarg)
def statement(self, node, *params, **kw):
self.newline(node)
self.write(*params)
def decorators(self, node, extra):
self.newline(extra=extra)
for decorator in node.decorator_list:
self.statement(decorator, '@', decorator)
def comma_list(self, items, trailing=False):
set_precedence(Precedence.Comma, *items)
for idx, item in enumerate(items):
self.write(', ' if idx else '', item)
self.write(',' if trailing else '')
# Statements
def visit_Assign(self, node):
set_precedence(node, node.value, *node.targets)
self.newline(node)
for target in node.targets:
self.write(target, ' = ')
self.visit(node.value)
def visit_AugAssign(self, node):
set_precedence(node, node.value, node.target)
self.statement(node, node.target, get_op_symbol(node.op, ' %s= '),
node.value)
def visit_AnnAssign(self, node):
set_precedence(node, node.target, node.annotation)
set_precedence(Precedence.Comma, node.value)
need_parens = isinstance(node.target, ast.Name) and not node.simple
begin = '(' if need_parens else ''
end = ')' if need_parens else ''
self.statement(node, begin, node.target, end, ': ', node.annotation)
self.conditional_write(' = ', node.value)
def visit_ImportFrom(self, node):
self.statement(node, 'from ', node.level * '.',
node.module or '', ' import ')
self.comma_list(node.names)
# Goofy stuff for Python 2.7 _pyio module
if node.module == '__future__' and 'unicode_literals' in (
x.name for x in node.names):
self.using_unicode_literals = True
def visit_Import(self, node):
self.statement(node, 'import ')
self.comma_list(node.names)
def visit_Expr(self, node):
set_precedence(node, node.value)
self.statement(node)
self.generic_visit(node)
def visit_FunctionDef(self, node, async=False):
prefix = 'async ' if async else ''
self.decorators(node, 1 if self.indentation else 2)
self.statement(node, '%sdef %s' % (prefix, node.name), '(')
self.visit_arguments(node.args)
self.write(')')
self.conditional_write(' ->', self.get_returns(node))
self.write(':')
self.body(node.body)
if not self.indentation:
self.newline(extra=2)
# introduced in Python 3.5
def visit_AsyncFunctionDef(self, node):
self.visit_FunctionDef(node, async=True)
def visit_ClassDef(self, node):
have_args = []
def paren_or_comma():
if have_args:
self.write(', ')
else:
have_args.append(True)
self.write('(')
self.decorators(node, 2)
self.statement(node, 'class %s' % node.name)
for base in node.bases:
self.write(paren_or_comma, base)
# keywords not available in early version
for keyword in self.get_keywords(node):
self.write(paren_or_comma, keyword.arg or '',
'=' if keyword.arg else '**', keyword.value)
self.conditional_write(paren_or_comma, '*', self.get_starargs(node))
self.conditional_write(paren_or_comma, '**', self.get_kwargs(node))
self.write(have_args and '):' or ':')
self.body(node.body)
if not self.indentation:
self.newline(extra=2)
def visit_If(self, node):
set_precedence(node, node.test)
self.statement(node, 'if ', node.test, ':')
self.body(node.body)
while True:
else_ = node.orelse
if len(else_) == 1 and isinstance(else_[0], ast.If):
node = else_[0]
set_precedence(node, node.test)
self.write('\n', 'elif ', node.test, ':')
self.body(node.body)
else:
self.else_body(else_)
break
def visit_For(self, node, async=False):
set_precedence(node, node.target)
prefix = 'async ' if async else ''
self.statement(node, '%sfor ' % prefix,
node.target, ' in ', node.iter, ':')
self.body_or_else(node)
# introduced in Python 3.5
def visit_AsyncFor(self, node):
self.visit_For(node, async=True)
def visit_While(self, node):
set_precedence(node, node.test)
self.statement(node, 'while ', node.test, ':')
self.body_or_else(node)
def visit_With(self, node, async=False):
prefix = 'async ' if async else ''
self.statement(node, '%swith ' % prefix)
if hasattr(node, "context_expr"): # Python < 3.3
self.visit_withitem(node)
else: # Python >= 3.3
self.comma_list(node.items)
self.write(':')
self.body(node.body)
# new for Python 3.5
def visit_AsyncWith(self, node):
self.visit_With(node, async=True)
# new for Python 3.3
def visit_withitem(self, node):
self.write(node.context_expr)
self.conditional_write(' as ', node.optional_vars)
def visit_NameConstant(self, node):
self.write(str(node.value))
def visit_Pass(self, node):
self.statement(node, 'pass')
def visit_Print(self, node):
# XXX: python 2.6 only
self.statement(node, 'print ')
values = node.values
if node.dest is not None:
self.write(' >> ')
values = [node.dest] + node.values
self.comma_list(values, not node.nl)
def visit_Delete(self, node):
self.statement(node, 'del ')
self.comma_list(node.targets)
def visit_TryExcept(self, node):
self.statement(node, 'try:')
self.body(node.body)
self.write(*node.handlers)
self.else_body(node.orelse)
# new for Python 3.3
def visit_Try(self, node):
self.statement(node, 'try:')
self.body(node.body)
self.write(*node.handlers)
self.else_body(node.orelse)
if node.finalbody:
self.statement(node, 'finally:')
self.body(node.finalbody)
def visit_ExceptHandler(self, node):
self.statement(node, 'except')
if self.conditional_write(' ', node.type):
self.conditional_write(' as ', node.name)
self.write(':')
self.body(node.body)
def visit_TryFinally(self, node):
self.statement(node, 'try:')
self.body(node.body)
self.statement(node, 'finally:')
self.body(node.finalbody)
def visit_Exec(self, node):
dicts = node.globals, node.locals
dicts = dicts[::-1] if dicts[0] is None else dicts
self.statement(node, 'exec ', node.body)
self.conditional_write(' in ', dicts[0])
self.conditional_write(', ', dicts[1])
def visit_Assert(self, node):
set_precedence(node, node.test, node.msg)
self.statement(node, 'assert ', node.test)
self.conditional_write(', ', node.msg)
def visit_Global(self, node):
self.statement(node, 'global ', ', '.join(node.names))
def visit_Nonlocal(self, node):
self.statement(node, 'nonlocal ', ', '.join(node.names))
def visit_Return(self, node):
set_precedence(node, node.value)
self.statement(node, 'return')
self.conditional_write(' ', node.value)
def visit_Break(self, node):
self.statement(node, 'break')
def visit_Continue(self, node):
self.statement(node, 'continue')
def visit_Raise(self, node):
# XXX: Python 2.6 / 3.0 compatibility
self.statement(node, 'raise')
if self.conditional_write(' ', self.get_exc(node)):
self.conditional_write(' from ', node.cause)
elif self.conditional_write(' ', self.get_type(node)):
set_precedence(node, node.inst)
self.conditional_write(', ', node.inst)
self.conditional_write(', ', node.tback)
# Expressions
def visit_Attribute(self, node):
self.write(node.value, '.', node.attr)
def visit_Call(self, node, len=len):
write = self.write
want_comma = []
def write_comma():
if want_comma:
write(', ')
else:
want_comma.append(True)
args = node.args
keywords = node.keywords
starargs = self.get_starargs(node)
kwargs = self.get_kwargs(node)
numargs = len(args) + len(keywords)
numargs += starargs is not None
numargs += kwargs is not None
p = Precedence.Comma if numargs > 1 else Precedence.call_one_arg
set_precedence(p, *args)
self.visit(node.func)
write('(')
for arg in args:
write(write_comma, arg)
set_precedence(Precedence.Comma, *(x.value for x in keywords))
for keyword in keywords:
# a keyword.arg of None indicates dictionary unpacking
# (Python >= 3.5)
arg = keyword.arg or ''
write(write_comma, arg, '=' if arg else '**', keyword.value)
# 3.5 no longer has these
self.conditional_write(write_comma, '*', starargs)
self.conditional_write(write_comma, '**', kwargs)
write(')')
def visit_Name(self, node):
self.write(node.id)
def visit_JoinedStr(self, node):
self.visit_Str(node, True)
def visit_Str(self, node, is_joined=False):
# embedded is used to control when we might want
# to use a triple-quoted string. We determine
# if we are in an assignment and/or in an expression
precedence = self.get__pp(node)
embedded = ((precedence > Precedence.Expr) +
(precedence >= Precedence.Assign))
# Flush any pending newlines, because we're about
# to severely abuse the result list.
self.write('')
result = self.result
# Calculate the string representing the line
# we are working on, up to but not including
# the string we are adding.
res_index, str_index = self.colinfo
current_line = self.result[res_index:]
if str_index:
current_line[0] = current_line[0][str_index:]
current_line = ''.join(current_line)
if is_joined:
# Handle new f-strings. This is a bit complicated, because
# the tree can contain subnodes that recurse back to JoinedStr
# subnodes...
def recurse(node):
for value in node.values:
if isinstance(value, ast.Str):
self.write(value.s)
elif isinstance(value, ast.FormattedValue):
with self.delimit('{}'):
self.visit(value.value)
if value.conversion != -1:
self.write('!%s' % chr(value.conversion))
if value.format_spec is not None:
self.write(':')
recurse(value.format_spec)
else:
kind = type(value).__name__
assert False, 'Invalid node %s inside JoinedStr' % kind
index = len(result)
recurse(node)
mystr = ''.join(result[index:])
del result[index:]
self.colinfo = res_index, str_index # Put it back like we found it
uni_lit = False # No formatted byte strings
else:
mystr = node.s
uni_lit = self.using_unicode_literals
mystr = self.pretty_string(mystr, embedded, current_line, uni_lit)
if is_joined:
mystr = 'f' + mystr
self.write(mystr)
lf = mystr.rfind('\n') + 1
if lf:
self.colinfo = len(result) - 1, lf
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_Num(self, node,
# constants
new=sys.version_info >= (3, 0)):
with self.delimit(node) as delimiters:
s = repr(node.n)
# Deal with infinities -- if detected, we can
# generate them with 1e1000.
signed = s.startswith('-')
if s[signed].isalpha():
im = s[-1] == 'j' and 'j' or ''
assert s[signed:signed + 3] == 'inf', s
s = '%s1e1000%s' % ('-' if signed else '', im)
self.write(s)
# The Python 2.x compiler merges a unary minus
# with a number. This is a premature optimization
# that we deal with here...
if not new and delimiters.discard:
if signed:
pow_lhs = Precedence.Pow + 1
delimiters.discard = delimiters.pp != pow_lhs
else:
op = self.get__p_op(node)
delimiters.discard = not isinstance(op, ast.USub)
def visit_Tuple(self, node):
with self.delimit(node) as delimiters:
# Two things are special about tuples:
# 1) We cannot discard the enclosing parentheses if empty
# 2) We need the trailing comma if only one item
elts = node.elts
delimiters.discard = delimiters.discard and elts
self.comma_list(elts, len(elts) == 1)
def visit_List(self, node):
with self.delimit('[]'):
self.comma_list(node.elts)
def visit_Set(self, node):
with self.delimit('{}'):
self.comma_list(node.elts)
def visit_Dict(self, node):
set_precedence(Precedence.Comma, *node.values)
with self.delimit('{}'):
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
self.write(', ' if idx else '',
key if key else '',
': ' if key else '**', value)
def visit_BinOp(self, node):
op, left, right = node.op, node.left, node.right
with self.delimit(node, op) as delimiters:
ispow = isinstance(op, ast.Pow)
p = delimiters.p
set_precedence((Precedence.Pow + 1) if ispow else p, left)
set_precedence(Precedence.PowRHS if ispow else (p + 1), right)
self.write(left, get_op_symbol(op, ' %s '), right)
def visit_BoolOp(self, node):
with self.delimit(node, node.op) as delimiters:
op = get_op_symbol(node.op, ' %s ')
set_precedence(delimiters.p + 1, *node.values)
for idx, value in enumerate(node.values):
self.write(idx and op or '', value)
def visit_Compare(self, node):
with self.delimit(node, node.ops[0]) as delimiters:
set_precedence(delimiters.p + 1, node.left, *node.comparators)
self.visit(node.left)
for op, right in zip(node.ops, node.comparators):
self.write(get_op_symbol(op, ' %s '), right)
def visit_UnaryOp(self, node):
with self.delimit(node, node.op) as delimiters:
set_precedence(delimiters.p, node.operand)
# In Python 2.x, a unary negative of a literal
# number is merged into the number itself. This
# bit of ugliness means it is useful to know
# what the parent operation was...
node.operand._p_op = node.op
sym = get_op_symbol(node.op)
self.write(sym, ' ' if sym.isalpha() else '', node.operand)
def visit_Subscript(self, node):
set_precedence(node, node.slice)
self.write(node.value, '[', node.slice, ']')
def visit_Slice(self, node):
set_precedence(node, node.lower, node.upper, node.step)
self.conditional_write(node.lower)
self.write(':')
self.conditional_write(node.upper)
if node.step is not None:
self.write(':')
if not (isinstance(node.step, ast.Name) and
node.step.id == 'None'):
self.visit(node.step)
def visit_Index(self, node):
with self.delimit(node) as delimiters:
set_precedence(delimiters.p, node.value)
self.visit(node.value)
def visit_ExtSlice(self, node):
dims = node.dims
set_precedence(node, *dims)
self.comma_list(dims, len(dims) == 1)
def visit_Yield(self, node):
with self.delimit(node):
set_precedence(get_op_precedence(node) + 1, node.value)
self.write('yield')
self.conditional_write(' ', node.value)
# new for Python 3.3
def visit_YieldFrom(self, node):
with self.delimit(node):
self.write('yield from ', node.value)
# new for Python 3.5
def visit_Await(self, node):
with self.delimit(node):
self.write('await ', node.value)
def visit_Lambda(self, node):
with self.delimit(node) as delimiters:
set_precedence(delimiters.p, node.body)
self.write('lambda ')
self.visit_arguments(node.args)
self.write(': ', node.body)
def visit_Ellipsis(self, node):
self.write('...')
def visit_ListComp(self, node):
with self.delimit('[]'):
self.write(node.elt, *node.generators)
def visit_GeneratorExp(self, node):
with self.delimit(node) as delimiters:
if delimiters.pp == Precedence.call_one_arg:
delimiters.discard = True
set_precedence(Precedence.Comma, node.elt)
self.write(node.elt, *node.generators)
def visit_SetComp(self, node):
with self.delimit('{}'):
self.write(node.elt, *node.generators)
def visit_DictComp(self, node):
with self.delimit('{}'):
self.write(node.key, ': ', node.value, *node.generators)
def visit_IfExp(self, node):
with self.delimit(node) as delimiters:
set_precedence(delimiters.p + 1, node.body, node.test)
set_precedence(delimiters.p, node.orelse)
self.write(node.body, ' if ', node.test, ' else ', node.orelse)
def visit_Starred(self, node):
self.write('*', node.value)
def visit_Repr(self, node):
# XXX: python 2.6 only
with self.delimit('``'):
self.visit(node.value)
def visit_Module(self, node):
self.write(*node.body)
visit_Interactive = visit_Module
def visit_Expression(self, node):
self.visit(node.body)
# Helper Nodes
def visit_arg(self, node):
self.write(node.arg)
self.conditional_write(': ', node.annotation)
def visit_alias(self, node):
self.write(node.name)
self.conditional_write(' as ', node.asname)
def visit_comprehension(self, node):
set_precedence(node, node.iter, *node.ifs)
set_precedence(Precedence.comprehension_target, node.target)
stmt = ' async for ' if self.get_is_async(node) else ' for '
self.write(stmt, node.target, ' in ', node.iter)
for if_ in node.ifs:
self.write(' if ', if_)
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/astor/code_gen.py
|
Python
|
mit
| 27,485
|
[
"VisIt"
] |
533c7fb86faef7df17bc2f297320524551fceed8a93c50a4ae4053247e5ea78f
|
#!/usr/bin/env python
########################################################################
# $HeadURL$
########################################################################
__RCSID__ = "$Id$"
from DIRAC import exit as DIRACExit
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Remove the given file replica or a list of file replicas from the File Catalog
and from the storage.
Usage:
%s <LFN | fileContainingLFNs> SE [SE]
""" % Script.scriptName )
Script.parseCommandLine()
from DIRAC.Core.Utilities.List import sortList, breakListIntoChunks
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
dm = DataManager()
import os, sys
if len( sys.argv ) < 3:
Script.showHelp()
DIRACExit( -1 )
else:
inputFileName = sys.argv[1]
storageElementNames = sys.argv[2:]
if os.path.exists( inputFileName ):
inputFile = open( inputFileName, 'r' )
string = inputFile.read()
lfns = [ lfn.strip() for lfn in string.splitlines() ]
inputFile.close()
else:
lfns = [inputFileName]
for lfnList in breakListIntoChunks( sortList( lfns, True ), 500 ):
for storageElementName in storageElementNames:
res = dm.removeReplica( storageElementName, lfnList )
if not res['OK']:
print 'Error:', res['Message']
for lfn in sortList( res['Value']['Successful'].keys() ):
print 'Successfully removed %s replica of %s' % ( storageElementName, lfn )
for lfn in sortList( res['Value']['Failed'].keys() ):
message = res['Value']['Failed'][lfn]
print 'Error: failed to remove %s replica of %s: %s' % ( storageElementName, lfn, message )
|
sposs/DIRAC
|
DataManagementSystem/scripts/dirac-dms-remove-replicas.py
|
Python
|
gpl-3.0
| 1,637
|
[
"DIRAC"
] |
6937475fa3ef71c18c590a65a75dd47857fd1ddc6012ffe12860c7cf89862779
|
r"""XML-RPC Servers.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
The Doc* classes can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the sys functions available through sys.func_name
import sys
self.sys = sys
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the sys methods
return list_public_methods(self) + \
['sys.' + method for method in list_public_methods(self.sys)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise ValueError('bad method')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode
from http.server import BaseHTTPRequestHandler
from functools import partial
from inspect import signature
import html
import http.server
import socketserver
import sys
import os
import re
import pydoc
import traceback
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
callable(getattr(obj, member))]
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer
"""
def __init__(self, allow_none=False, encoding=None,
use_builtin_types=False):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
self.use_builtin_types = use_builtin_types
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches an XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function=None, name=None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
# decorator factory
if function is None:
return partial(self.register_function, name=name)
if name is None:
name = function.__name__
self.funcs[name] = function
return function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = loads(data, use_builtin_types=self.use_builtin_types)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault as fault:
response = dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
try:
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
finally:
# Break reference cycle
exc_type = exc_value = exc_tb = None
return response.encode(self.encoding, 'xmlcharrefreplace')
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = set(self.funcs.keys())
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods |= set(self.instance._listMethods())
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods |= set(list_public_methods(self.instance))
return sorted(methods)
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault as fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
try:
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
finally:
# Break reference cycle
exc_type = exc_value = exc_tb = None
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
try:
# call the matching registered function
func = self.funcs[method]
except KeyError:
pass
else:
if func is not None:
return func(*params)
raise Exception('method "%s" is not supported' % method)
if self.instance is not None:
if hasattr(self.instance, '_dispatch'):
# call the `_dispatch` method on the instance
return self.instance._dispatch(method, params)
# call the instance's method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
else:
if func is not None:
return func(*params)
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
chunk = self.rfile.read(chunk_size)
if not chunk:
break
L.append(chunk)
size_remaining -= len(L[-1])
data = b''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
trace = traceback.format_exc()
trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII')
self.send_header("X-traceback", trace)
self.send_header("Content-length", "0")
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = b'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(socketserver.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inherited
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate, use_builtin_types)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
try:
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
response = response.encode(self.encoding, 'xmlcharrefreplace')
finally:
# Break reference cycle
exc_type = exc_value = None
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None, use_builtin_types=False):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print('Content-Type: text/xml')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = BaseHTTPRequestHandler.responses[code]
response = http.server.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
response = response.encode('utf-8')
print('Status: %d %s' % (code, message))
print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE)
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (ValueError, TypeError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
# -----------------------------------------------------------------------------
# Self documenting XML-RPC Server.
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if callable(object):
argspec = str(signature(object))
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', ''.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(html.escape(self.server_title), documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation().encode('utf-8')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate,
use_builtin_types)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation().encode('utf-8')
print('Content-Type: text/html')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
import datetime
class ExampleService:
def getData(self):
return '42'
class currentTime:
@staticmethod
def getCurrentTime():
return datetime.datetime.now()
with SimpleXMLRPCServer(("localhost", 8000)) as server:
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.register_instance(ExampleService(), allow_dotted_names=True)
server.register_multicall_functions()
print('Serving XML-RPC on localhost port 8000')
print('It is advisable to run this example server within a secure, closed network.')
try:
server.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
sys.exit(0)
|
xyuanmu/XX-Net
|
python3.8.2/Lib/xmlrpc/server.py
|
Python
|
bsd-2-clause
| 36,665
|
[
"Brian"
] |
ae2017eb9eb0403bf04a51f3ccf17c73aa4ec47d120779f80e2f6c2beba3268b
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.