text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""
Copyright (c) 2014 Brian Muller
Copyright (c) 2015 OpenBazaar
"""
import heapq
from operator import itemgetter
from protos import objects
class Node:
def __init__(self, id, ip=None, port=None, signed_pubkey=None,
vendor=False):
self.id = id
self.ip = ip
self.port = port
self.signed_pubkey = signed_pubkey
self.vendor = vendor
self.long_id = long(id.encode('hex'), 16)
def getProto(self):
n = objects.Node()
n.guid = self.id
n.signedPublicKey = self.signed_pubkey
n.vendor = self.vendor
if self.ip is not None: n.ip = self.ip
if self.port is not None: n.port = self.port
return n
def sameHomeAs(self, node):
return self.ip == node.ip and self.port == node.port
def distanceTo(self, node):
"""
Get the distance between this node and another.
"""
return self.long_id ^ node.long_id
def __iter__(self):
"""
Enables use of Node as a tuple - i.e., tuple(node) works.
"""
return iter([self.id, self.ip, self.port])
def __repr__(self):
return repr([self.long_id, self.ip, self.port])
def __str__(self):
return "%s:%s" % (self.ip, str(self.port))
class NodeHeap(object):
"""
A heap of nodes ordered by distance to a given node.
"""
def __init__(self, node, maxsize):
"""
Constructor.
@param node: The node to measure all distnaces from.
@param maxsize: The maximum size that this heap can grow to.
"""
self.node = node
self.heap = []
self.contacted = set()
self.maxsize = maxsize
def remove(self, peerIDs):
"""
Remove a list of peer ids from this heap. Note that while this
heap retains a constant visible size (based on the iterator), it's
actual size may be quite a bit larger than what's exposed. Therefore,
removal of nodes may not change the visible size as previously added
nodes suddenly become visible.
"""
peerIDs = set(peerIDs)
if len(peerIDs) == 0:
return
nheap = []
for distance, node in self.heap:
if node.id not in peerIDs:
heapq.heappush(nheap, (distance, node))
self.heap = nheap
def getNodeById(self, id):
for _, node in self.heap:
if node.id == id:
return node
return None
def allBeenContacted(self):
return len(self.getUncontacted()) == 0
def getIDs(self):
return [n.id for n in self]
def markContacted(self, node):
self.contacted.add(node.id)
def popleft(self):
if len(self) > 0:
return heapq.heappop(self.heap)[1]
return None
def push(self, nodes):
"""
Push nodes onto heap.
@param nodes: This can be a single item or a C{list}.
"""
if not isinstance(nodes, list):
nodes = [nodes]
for node in nodes:
if node not in self:
distance = self.node.distanceTo(node)
heapq.heappush(self.heap, (distance, node))
def __len__(self):
return min(len(self.heap), self.maxsize)
def __iter__(self):
nodes = heapq.nsmallest(self.maxsize, self.heap)
return iter(map(itemgetter(1), nodes))
def __contains__(self, node):
for distance, n in self.heap:
if node.id == n.id:
return True
return False
def getUncontacted(self):
return [n for n in self if n.id not in self.contacted]
|
jorik041/Network
|
dht/node.py
|
Python
|
mit
| 3,684
|
[
"Brian"
] |
5776d4b759dfdf69168c32f3fdf95cd6643c5555548ddadf47fc8d7738a616b4
|
#!/usr/bin/env python
import sys
import tornado
from DIRAC import gConfig, S_OK
from DIRAC.Core.Base import Script
from DIRAC.ConfigurationSystem.Client.LocalConfiguration import LocalConfiguration
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from WebAppDIRAC.Core.App import App
if __name__ == "__main__":
def disableDevMode( op ):
gConfig.setOptionValue( "/WebApp/DevelopMode", "False" )
return S_OK()
localCfg = LocalConfiguration()
localCfg.setConfigurationForWeb( "WebApp" )
localCfg.addMandatoryEntry( "/DIRAC/Setup" )
localCfg.addDefaultEntry( "/DIRAC/Security/UseServerCertificate", "yes" )
localCfg.addDefaultEntry( "LogLevel", "INFO" )
localCfg.addDefaultEntry( "LogColor", True )
localCfg.registerCmdOpt( "p", "production", "Enable production mode", disableDevMode )
result = localCfg.loadUserData()
if not result[ 'OK' ]:
gLogger.initialize( "WebApp", "/" )
gLogger.fatal( "There were errors when loading configuration", result[ 'Message' ] )
sys.exit( 1 )
app = App()
result = app.bootstrap()
if not result[ 'OK' ]:
gLogger.fatal( result[ 'Message' ] )
sys.exit( 1 )
app.run()
|
zmathe/WebAppDIRAC
|
scripts/dirac-webapp-run.py
|
Python
|
gpl-3.0
| 1,166
|
[
"DIRAC"
] |
526dd6705325bfc567c21f82687c65195c1688673cf44d82dd46c3ac1f232bc9
|
# Setup script for megaman: scalable manifold learning
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import io
import os
import re
import sys
import subprocess
PY2 = sys.version_info[0] == 2
PY3 = not PY2
if PY3:
import importlib.machinery
def read(path, encoding='utf-8'):
path = os.path.join(os.path.dirname(__file__), path)
with io.open(path, encoding=encoding) as fp:
return fp.read()
def version(path):
"""Obtain the packge version from a python file e.g. pkg/__init__.py
See <https://packaging.python.org/en/latest/single_source_version.html>.
"""
version_file = read(path)
version_match = re.search(r"""^__version__ = ['"]([^'"]*)['"]""",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'megaman'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('megaman')
return config
DESCRIPTION = "megaman: Manifold Learning for Millions of Points"
LONG_DESCRIPTION = """
megaman: Manifold Learning for Millions of Points
=================================================
This repository contains a scalable implementation of several manifold learning
algorithms, making use of FLANN for fast approximate nearest neighbors and
PyAMG, LOBPCG, ARPACK, and other routines for fast matrix decompositions.
For more information, visit https://github.com/mmp2/megaman
"""
NAME = "megaman"
AUTHOR = "Marina Meila"
AUTHOR_EMAIL = "mmp@stat.washington.delete_this.edu"
URL = 'https://github.com/mmp2/megaman'
DOWNLOAD_URL = 'https://github.com/mmp2/megaman'
LICENSE = 'BSD 3'
VERSION = version('megaman/__init__.py')
def setup_package():
from numpy.distutils.core import setup
old_path = os.getcwd()
local_path = os.path.dirname(os.path.abspath(sys.argv[0]))
src_path = local_path
os.chdir(local_path)
sys.path.insert(0, local_path)
# Run build
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
try:
setup(name='megaman',
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
description=DESCRIPTION,
long_description = LONG_DESCRIPTION,
version=VERSION,
license=LICENSE,
configuration=configuration,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'])
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
|
jmcq89/megaman
|
setup.py
|
Python
|
bsd-2-clause
| 3,912
|
[
"VisIt"
] |
66a2a18fd324bb1c1e0a9d1a3999152e1ad5d99afc4c35593a0cb249378cec4f
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('get_current_user')
@pass_context
@custom_exception
@json_output
def cli(ctx):
"""Display information about the user associated with this Galaxy connection.
Output:
a dictionary containing information about the current user
"""
return ctx.gi.users.get_current_user()
|
galaxy-iuc/parsec
|
parsec/commands/users/get_current_user.py
|
Python
|
apache-2.0
| 423
|
[
"Galaxy"
] |
1c7fe37a3a0b95e3fa913c0775bdab3a15ca26c8dd0fed07ae3381454ce63de3
|
# -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N, N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
arg, arg_type, desc = doc['Other Parameters'][0]
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert_equal(len(doc['Returns']), 2)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert_equal(arg, 'list of str')
assert_equal(arg_type, '')
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name,_,desc = doc5['Warns'][0]
assert_equal(name,'SomeWarning')
assert_equal(desc,['If needed'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
class SubDummy(Dummy):
"""
Subclass of Dummy class.
"""
def ham(self, c, d):
"""Cheese\n\nNo cheese.\nOverloaded Dummy.ham"""
pass
def bar(self, a, b):
"""Bar\n\nNo bar"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(SubDummy, config=dict(show_class_members=True,
show_inherited_class_members=False))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'bar' in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' not in str(doc), str(doc)
doc = cls(SubDummy, config=dict(show_class_members=True,
show_inherited_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'bar' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
if __name__ == "__main__":
import nose
nose.run()
|
rubennj/pvlib-python
|
docs/sphinx/sphinxext/numpydoc/tests/test_docscrape.py
|
Python
|
bsd-3-clause
| 19,792
|
[
"Gaussian"
] |
608014a5a6ddb640e2a98311e98d0027c5c99abcbfacb6b44cacf551d798d9d2
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Brian C. Lane <bcl@redhat.com>
# Ignore any interruptible calls
# pylint: disable=interruptible-system-call
from pyanaconda.simpleconfig import SimpleConfigFile
from pyanaconda import simpleconfig
import unittest
import tempfile
class SimpleConfigTests(unittest.TestCase):
TEST_CONFIG = """ESSID="Example Network #1"
ESSID2="Network #2" # With a comment
COMMENT="Save this string" # Strip this comment
#SKIP=Skip this commented line
BOOT=always
KEY=VALUE # Comment "with quotes"
KEY2="A single ' inside" # And comment "with quotes"
"""
def comment_test(self):
with tempfile.NamedTemporaryFile(mode="wt") as testconfig:
testconfig.write(self.TEST_CONFIG)
testconfig.flush()
config = SimpleConfigFile(testconfig.name)
config.read()
self.assertEqual(config.get("ESSID"), "Example Network #1")
self.assertEqual(config.get("ESSID2"), "Network #2")
self.assertEqual(config.get("COMMENT"), "Save this string")
self.assertEqual(str(config), self.TEST_CONFIG)
def unquote_test(self):
self.assertEqual(simpleconfig.unquote("plain string"), "plain string")
self.assertEqual(simpleconfig.unquote('"double quote"'), "double quote")
self.assertEqual(simpleconfig.unquote("'single quote'"), "single quote")
def quote_test(self):
self.assertEqual(simpleconfig.quote("nospaces"), "nospaces")
self.assertEqual(simpleconfig.quote("plain string"), '"plain string"')
self.assertEqual(simpleconfig.quote("alwaysquote", always=True), '"alwaysquote"')
def set_and_get_test(self):
"""Setting and getting values"""
scf = SimpleConfigFile()
scf.set(('key1', 'value1'))
self.assertEqual(scf.get('key1'), 'value1')
scf.set(('KEY2', 'value2'))
self.assertEqual(scf.get('key2'), 'value2')
scf.set(('KEY3', 'value3'))
self.assertEqual(scf.get('KEY3'), 'value3')
scf.set(('key4', 'value4'))
self.assertEqual(scf.get('KEY4'), 'value4')
def unset_test(self):
scf = SimpleConfigFile()
scf.set(('key1', 'value1'))
scf.unset(('key1'))
self.assertEqual(scf.get('key1'), '')
def write_test(self):
with tempfile.NamedTemporaryFile(mode="wt") as testconfig:
scf = SimpleConfigFile()
scf.set(('key1', 'value1'))
scf.write(testconfig.name)
testconfig.flush()
self.assertEqual(open(testconfig.name).read(), 'KEY1=value1\n')
def read_test(self):
with tempfile.NamedTemporaryFile(mode="wt") as testconfig:
scf = SimpleConfigFile()
open(testconfig.name, 'w').write('KEY1="value1"\n')
testconfig.flush()
scf.read(testconfig.name)
self.assertEqual(scf.get('key1'), 'value1')
def read_write_test(self):
with tempfile.NamedTemporaryFile(mode="wt") as testconfig:
testconfig.write(self.TEST_CONFIG)
testconfig.flush()
scf = SimpleConfigFile()
scf.read(testconfig.name)
scf.write(testconfig.name)
testconfig.flush()
self.assertEqual(open(testconfig.name).read(), self.TEST_CONFIG)
def write_new_keys_test(self):
with tempfile.NamedTemporaryFile(mode="wt") as testconfig:
testconfig.write(self.TEST_CONFIG)
testconfig.flush()
scf = SimpleConfigFile()
scf.read(testconfig.name)
scf.set(("key1", "value1"))
scf.write(testconfig.name)
testconfig.flush()
self.assertEqual(open(testconfig.name).read(),
self.TEST_CONFIG+"KEY1=value1\n")
def remove_key_test(self):
with tempfile.NamedTemporaryFile(mode="wt") as testconfig:
testconfig.write(self.TEST_CONFIG)
testconfig.flush()
scf = SimpleConfigFile()
scf.read(testconfig.name)
self.assertEqual(scf.get("BOOT"), "always")
scf.unset("BOOT")
scf.write(testconfig.name)
testconfig.flush()
scf.reset()
scf.read(testconfig.name)
self.assertEqual(scf.get("BOOT"), "")
|
bcl/anaconda
|
tests/pyanaconda_tests/simpleconfig_test.py
|
Python
|
gpl-2.0
| 5,281
|
[
"Brian"
] |
6dd8e63baaa41d77aef1c4773ee36e932b813d11e8925da76775ea1a4060c6a4
|
# -*- coding: utf-8 -*-
# Copyright 2008-2014 Jaap Karssenberg <jaap.karssenberg@gmail.com>
'''This module defines the ExportTemplateContext, which is a dictionary
used to set the template parameters when exporting.
Export template parameters supported::
generator
.name -- "Zim x.xx"
.user
title
navigation - links to other export pages (if not included here)
home
up
prev -- prev export file or None
next -- next export file or None
links -- links to other export pages (index & plugins / ...) - sorted dict to have Index, Home first followed by plugins
link
.name
.basename
pages -- iter over special + content
.special -- iter special pages to be included (index / plugins / ...) - support get() as well here
.content -- iter pages being exported
page
.title -- heading or basename
.name / .section / .basename
.heading
.body -- full body minus first heading
.content -- heading + body
.headings(max_level) -- iter over headings
headingsection
.level
.heading
.body
.content
.properties
.links
.backlinks
.attachments
file
.basename
.mtime
.size
options -- dict with template options (for format)
toc([page]) -- iter of headings in this page or all of pages
index([section]) -- index of full export job, not just in this page
uri(link|file)
resource(file)
anchor(page|section)
From template base::
range() / len() / sorted() / reversed()
strftime()
strfcal()
Test in a template for single page export use: "IF loop.first and loop.last"
'''
import logging
logger = logging.getLogger('zim.export')
from zim import __version__ as ZIM_VERSION
import zim.datetimetz as datetime
from zim.utils import OrderedDict
from zim.fs import format_file_size
from zim.environ import environ
from zim.index import LINK_DIR_BACKWARD, LINK_DIR_FORWARD
from zim.notebook import Path
from zim.formats import ParseTree, ParseTreeBuilder, Visitor, \
FORMATTEDTEXT, BULLETLIST, LISTITEM, STRONG, LINK, HEADING
from zim.templates import TemplateContextDict
from zim.templates.functions import ExpressionFunction
class ExportTemplateContext(dict):
# No need to inherit from TemplateContextDict here, the template
# will do a copy first anyway to protect changing content in this
# object. This means functions and proxies can assume this dict is
# save, and only "options" is un-save input.
#
# This object is not intended for re-use -- just instantiate a
# new one for each export page
def __init__(self, notebook, linker_factory, dumper_factory,
title, content, special=None,
home=None, up=None, prevpage=None, nextpage=None,
links=None,
index_generator=None, index_page=None,
):
'''Constructor
When exporting one notebook page per export page ("multi file"),
'C{content}' is a list of one page everytime. Even for exporting
special pages, they go into 'C{content}' one at a time.
The special pages are linked in 'C{links}' so the template can
refer to them.
When exporting multiple notebook pages to a single export page
("single file"), 'C{content}' is a list of all notebook pages a
nd 'C{special}' a list.
@param notebook: L{Notebook} object
@param linker_factory: function producing L{ExportLinker} objects
@param dumper_factory: function producing L{DumperClass} objects
@param title: the export page title
@param content: list of notebook pages to be exported
@param special: list of special notebook pages to be exported if any
@param home: link to home page if any
@param up: link to parent export page if any
@param prevpage: link to previous export page if any
@param nextpage: link to next export page if any
@param links: list of links to special pages if any, links are
given as a 2-tuple of a key and a target (either a L{Path} or
a L{NotebookPathProxy})
@param index_generator: a generator function or that
provides L{IndexPath} or L{Page} objects to be used for the
the C{index()} function. This method should take a single
argument for the root namespace to show.
See the definition of L{Index.walk()} or L{PageSelection.index()}.
@param index_page: the current page to show in the index if any
'''
# TODO get rid of need of notebook here!
self._content = content
self._linker_factory = linker_factory
self._dumper_factory = dumper_factory
self._index_generator = index_generator or content
self._index_page = index_page
self.linker = linker_factory()
def _link(l):
if isinstance(l, basestring):
return UriProxy(l)
elif isinstance(l, Path):
return NotebookPathProxy(l)
else:
assert l is None or isinstance(l, (NotebookPathProxy, FileProxy))
return l
if special:
pages = ExportTemplatePageIter(
special=PageListProxy(notebook, special, self.dumper_factory),
content=PageListProxy(notebook, content, self.dumper_factory)
)
else:
pages = ExportTemplatePageIter(
content=PageListProxy(notebook, content, self.dumper_factory)
)
self.update({
# Parameters
'generator': {
'name': 'Zim %s' % ZIM_VERSION,
'user': environ['USER'], # TODO allow user name in prefs ?
},
'title': title,
'navigation': {
'home': _link(home),
'up': _link(up),
'prev': _link(prevpage),
'next': _link(nextpage),
},
'links': OrderedDict(), # keep order of links for iteration
'pages': pages,
# Template settings
'options': TemplateContextDict({}), # can be modified by template
# Functions
#~ 'toc': self.toc_function,
'index': self.index_function,
'pageindex': self.index_function, # backward compatibility
'uri': self.uri_function,
'anchor': self.anchor_function,
'resource': self.resource_function,
})
if links:
for k, l in links.items():
l = _link(l)
self['links'][k] = l
def dumper_factory(self, page):
'''Returns a L{DumperClass} instance for source page C{page}
Only template options defined before this method is called are
included, so only construct the "dumper" when you are about to
use it
'''
linker = self._linker_factory(source=page)
return self._dumper_factory(
linker=linker,
template_options=self['options']
)
#~ @ExpressionFunction
#~ def toc_function(self):
#~ # TODO
#~ # needs way to link heading achors in exported code (html)
#~ # pass these anchors through the parse tree
#~
#~ builder = ParseTreeBuilder()
#~ builder.start(FORMATTEDTEXT)
#~ builder.start(BULLETLIST)
#~ for page in self._content:
#~ current = 1
#~ for level, heading in ...:
#~ if level > current:
#~ for range(current, level):
#~ builder.start(BULLETLIST)
#~ current = level
#~ elif level < current:
#~ for range(level, current):
#~ builder.end(BULLETLIST)
#~ current = level
#~ builder.start(LISTITEM)
#~ builder.append(LINK, {'href': ...}, anchor)
#~ builder.end(LISTITEM)
#~ for range(1, current):
#~ builder.end(BULLETLIST)
#~
#~ builder.end(BULLETLIST)
#~ builder.end(FORMATTEDTEXT)
#~ tree = builder.get_parsetree()
#~ if not tree:
#~ return ''
#~ print "!!!", tree.tostring()
#~ dumper = self.dumper_factory(None)
#~ return ''.join(dumper.dump(tree))
@ExpressionFunction
def index_function(self, namespace=None, collapse=True, ignore_empty=True):
'''Index function for export template
@param namespace: the namespace to include
@param collapse: if C{True} only the branch of the current page
is shown, if C{False} the whole index is shown
@param ignore_empty: if C{True} empty pages (placeholders) are
not shown in the index
'''
if not self._index_generator:
return ''
builder = ParseTreeBuilder()
builder.start(FORMATTEDTEXT)
builder.start(BULLETLIST)
if self._index_page:
expanded = [self._index_page] + list(self._index_page.parents())
else:
expanded = []
stack = []
for path in self._index_generator(namespace):
if self._index_page and collapse \
and not path.parent in expanded:
continue # skip since it is not part of current path
elif ignore_empty and not (path.hascontent or path.haschildren):
continue # skip since page is empty
if not stack:
stack.append(path.parent)
elif stack[-1] != path.parent:
if path.ischild(stack[-1]):
builder.start(BULLETLIST)
stack.append(path.parent)
else:
while stack and stack[-1] != path.parent:
builder.end(BULLETLIST)
stack.pop()
builder.start(LISTITEM)
if path == self._index_page:
# Current page is marked with the strong style
builder.append(STRONG, text=path.basename)
else:
# links to other pages
builder.append(LINK,
{'type': 'page', 'href': ':'+path.name},
path.basename)
builder.end(LISTITEM)
for p in stack:
builder.end(BULLETLIST)
builder.end(FORMATTEDTEXT)
tree = builder.get_parsetree()
if not tree:
return ''
#~ print "!!!", tree.tostring()
dumper = self.dumper_factory(None)
return ''.join(dumper.dump(tree))
@ExpressionFunction
def uri_function(self, link):
if isinstance(link, UriProxy):
return link.uri
elif isinstance(link, NotebookPathProxy):
return self.linker.page_object(link._path)
elif isinstance(link, FilePathProxy):
return self.linker.file_object(link._file)
elif isinstance(link, basestring):
return self.linker.link(link)
else:
return None
@ExpressionFunction
def anchor_function(self, page):
# TODO remove prefix from anchors?
if isinstance(page, (PageProxy, NotebookPathProxy)):
return page.name
else:
return page
@ExpressionFunction
def resource_function(self, link):
return self.linker.resource(link)
class ExportTemplatePageIter(object):
def __init__(self, special=None, content=None):
self.special = special or []
self.content = content or []
def __iter__(self):
for p in self.special:
yield p
for p in self.content:
yield p
class HeadingSplitter(Visitor):
def __init__(self, max_level=None):
self.max_level = max_level or 999
self._builder = ParseTreeBuilder()
self.headings = []
def _split(self):
self._builder.end(FORMATTEDTEXT)
tree = self._builder.get_parsetree()
if tree.hascontent:
self.headings.append(tree)
self._builder = ParseTreeBuilder()
self._builder.start(FORMATTEDTEXT)
def _close(self):
tree = self._builder.get_parsetree()
if tree.hascontent:
self.headings.append(tree)
def start(self, tag, attrib=None):
if tag is HEADING and int(attrib['level']) <= self.max_level:
self._split()
self._builder.start(tag, attrib)
def end(self, tag):
self._builder.end(tag)
if tag == FORMATTEDTEXT:
self._close()
def text(self, text):
self._builder.text(text)
def append(self, tag, attrib=None, text=None):
if tag is HEADING and int(attrib['level']) <= self.max_level:
self._split()
self._builder.append(tag, attrib, text)
class PageListProxy(object):
def __init__(self, notebook, iterable, dumper_factory):
self._notebook = notebook
self._iterable = iterable
self._dumper_factory = dumper_factory
def __iter__(self):
for page in self._iterable:
dumper = self._dumper_factory(page)
yield PageProxy(self._notebook, page, dumper)
class ParseTreeProxy(object):
@property
def heading(self):
head, body = self._split_head()
return head
@property
def body(self):
try:
head, body = self._split_head()
if body:
lines = self._dumper.dump(body)
return u''.join(lines)
else:
return ''
except:
logger.exception('Exception exporting page: %s', self._page.name)
raise # will result in a "no such parameter" kind of error
@property
def content(self):
try:
if self._tree:
lines = self._dumper.dump(self._tree)
return u''.join(lines)
else:
return ''
except:
logger.exception('Exception exporting page: %s', self._page.name)
raise # will result in a "no such parameter" kind of error
def _split_head(self):
if not hasattr(self, '_severed_head'):
if self._tree:
tree = self._tree.copy()
head, level = tree.pop_heading()
self._severed_head = (head, tree) # head can be None here
else:
self._severed_head = (None, None)
return self._severed_head
class PageProxy(ParseTreeProxy):
def __init__(self, notebook, page, dumper):
self._notebook = notebook
self._page = page
self._tree = page.get_parsetree()
self._dumper = dumper
self.name = self._page.name
self.section = self._page.namespace
self.namespace = self._page.namespace # backward compat
self.basename = self._page.basename
self.properties = self._page.properties
@property
def title(self):
return self.heading or self.basename
@ExpressionFunction
def headings(self, max_level=None):
if self._tree and self._tree.hascontent:
splitter = HeadingSplitter(max_level)
self._tree.visit(splitter)
for subtree in splitter.headings:
yield HeadingProxy(self._page, subtree, self._dumper)
@property
def links(self):
links = self._notebook.index.list_links(self._page, LINK_DIR_FORWARD)
for link in links:
yield NotebookPathProxy(link.target)
@property
def backlinks(self):
links = self._notebook.index.list_links(self._page, LINK_DIR_BACKWARD)
for link in links:
yield NotebookPathProxy(link.source)
@property
def attachments(self):
dir = self._notebook.get_attachments_dir(self._page)
for basename in dir.list():
file = dir.file(basename)
if file.exists(): # is file
yield FileProxy(file, './'+basename)
class HeadingProxy(ParseTreeProxy):
def __init__(self, page, tree, dumper):
self._page = page
self._tree = tree
self._dumper = dumper
self.level = tree.get_heading_level() or 1
class FilePathProxy(object):
def __init__(self, file, relpath=None):
self._file = file
self.name = relpath or file.basename
self.basename = file.basename
class FileProxy(FilePathProxy):
@property
def mtime(self):
return datetime.datetime.fromtimestamp(float(self._file.mtime()))
@property
def size(self):
return format_file_size(self._file.size())
class NotebookPathProxy(object):
def __init__(self, path):
self._path = path
self.name = path.name
self.basename = path.basename
self.section = path.namespace
self.namespace = path.namespace # backward compat
class UriProxy(object):
def __init__(self, uri):
self.uri = uri
def __str__(self):
return self.uri
|
fabricehong/zim-desktop
|
zim/export/template.py
|
Python
|
gpl-2.0
| 14,460
|
[
"VisIt"
] |
40d4bb8c1c1231dec7bbddb63dafb0fe395aa210f622d5b66a700f8b6928a6c3
|
#!/usr/bin/env python
#
# MIMEHandler.py
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
import sys
import os
import logging
log = logging.getLogger("Thug")
import base64
import hashlib
import zipfile
import rarfile
import tempfile
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
SSDEEP = True
try:
import ssdeep
except ImportError:
SSDEEP = False
import bs4 as BeautifulSoup
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
PEEPDF = True
try:
from Analysis.peepdf.PDFCore import PDFParser, vulnsDict
except:
PEEPDF = False
from datetime import datetime
from lxml import etree
ANDROGUARD = True
try:
from androguard.core import androconf
from androguard.core.bytecodes import apk
from androguard.core.bytecodes import dvm
from androguard.core.analysis import analysis
except ImportError:
log.warning("[WARNING] Androguard not found - APK analysis disabled")
ANDROGUARD = False
class MIMEHandler(dict):
"""
MIMEHandler class is meant to allow registering MIME handlers the
same way a real browser would do.
The default handling for almost all Content-Types is to not further
processing the downloaded content and this can be done by returning
True from the Content-Type handler. The method `passthrough' is the
default handler associated to almost all Content-Types with the few
exceptions defined in the method `register_empty_handlers'.
Two ways actually exist for further processing a downloaded content.
The first one is returning False from the the Content-Type handler.
The second one is having a None Content-Type handler which turns to
be quite useful when an unknown Content-Type is served. In such case
the __missing__ method will return None (thus enabling further content
processing) and log the unknown Content-Type for convenience.
This design is quite flexible because i.e. you can decide to instantiate
your own PDF analysis system really quickly by simply defining a new
application/pdf Content-Type handler.
"""
mimetypes = ("application/download",
"application/envoy",
"application/exe",
"application/fractals",
"application/futuresplash",
"application/hta",
"application/internet-property-stream",
"application/java-archive",
"application/javascript",
"application/mac-binhex40",
"application/msword",
"application/octet-stream",
"application/oda",
"application/olescript",
"application/pdf",
"application/pics-rules",
"application/pkcs10",
"application/pkix-crl",
"application/postscript",
"application/rar",
"application/rtf",
"application/set-payment-initiation",
"application/set-registration-initiation",
"application/vnd.android.package-archive",
"application/vnd.ms-excel",
"application/vnd.ms-outlook",
"application/vnd.ms-pkicertstore",
"application/vnd.ms-pkiseccat",
"application/vnd.ms-pkistl",
"application/vnd.ms-powerpoint",
"application/vnd.ms-project",
"application/vnd.ms-works",
"application/winhlp",
"application/x-bcpio",
"application/x-bzip2",
"application/x-cdf",
"application/x-chrome-extension",
"application/x-compress",
"application/x-compressed",
"application/x-cpio",
"application/x-csh",
"application/x-director",
"application/x-dosexec",
"application/x-dvi",
"application/x-gtar",
"application/x-gzip",
"application/x-hdf",
"application/x-internet-signup",
"application/x-iphone",
"application/x-java-jnlp-file",
"application/x-javascript",
"application/x-latex",
"application/x-msaccess",
"application/x-mscardfile",
"application/x-msclip",
"application/x-msdos-program",
"application/x-msdownload",
"application/x-msmediaview",
"application/x-msmetafile",
"application/x-msmoney",
"application/x-mspublisher",
"application/x-msschedule",
"application/x-msterminal",
"application/x-mswrite",
"application/x-netcdf",
"application/x-perfmon",
"application/x-pkcs12",
"application/x-pkcs7-certificates",
"application/x-pkcs7-certreqresp",
"application/x-pkcs7-mime",
"application/x-pkcs7-signature",
"application/x-rar-compressed",
"application/x-sh",
"application/x-shar",
"application/x-shockwave-flash",
"application/x-silverlight-2",
"application/x-stuffit",
"application/x-sv4cpio",
"application/x-sv4crc",
"application/x-tar",
"application/x-tcl",
"application/x-tex",
"application/x-texinfo",
"application/x-troff",
"application/x-troff-man",
"application/x-troff-me",
"application/x-troff-ms",
"application/x-ustar",
"application/x-wais-source",
"application/x-x509-ca-cert",
"application/x-xpinstall",
"application/x-zip-compressed",
"application/ynd.ms-pkipko",
"application/zip",
"audio/basic",
"audio/mid",
"audio/mpeg",
"audio/x-aiff",
"audio/x-mpegurl",
"audio/x-ms-wma",
"audio/x-pn-realaudio",
"audio/x-wav",
"image/bmp",
"image/bmpimage/x-bmp",
"image/cis-cod",
"image/gif",
"image/ief",
"image/jpeg",
"image/pipeg",
"image/png",
"image/svg+xml",
"image/tiff",
"image/x-cmu-raster",
"image/x-cmx",
"image/x-icon",
"image/x-portable-anymap",
"image/x-portable-bitmap",
"image/x-portable-graymap",
"image/x-portable-pixmap",
"image/x-rgb",
"image/x-xbitmap",
"image/x-xpixmap",
"image/x-xwindowdump",
"message/rfc822",
"text/css",
"text/h323",
"text/html",
"text/iuls",
"text/javascript",
"text/plain",
"text/richtext",
"text/scriptlet",
"text/tab-separated-values",
"text/vnd.wap.wml",
"text/webviewhtml",
"text/x-component",
"text/x-setext",
"text/x-vcard",
"video/mpeg",
"video/quicktime",
"video/x-la-asf",
"video/x-ms-asf",
"video/x-msvideo",
"video/x-sgi-movie",
"x-world/x-vrml")
def __missing__(self, key):
_key = key.split(';')[0].strip()
if _key in self:
return self[_key]
log.warning("[MIMEHandler] Unknown MIME Type: %s", key)
return self.passthrough
def __init__(self):
for mimetype in self.mimetypes:
self[mimetype] = self.passthrough
self.handlers = list()
self.register_empty_handlers()
self.register_fallback_handlers()
self.register_zip_handlers()
self.register_rar_handlers()
self.register_pdf_handlers()
self.register_android_handlers()
self.register_java_jnlp_handlers()
def register_empty_handlers(self):
self['application/javascript'] = None
self['application/x-javascript'] = None
self['text/css'] = None
self['text/html'] = None
#self['text/plain'] = None
self['text/javascript'] = None
def register_fallback_handlers(self):
self['text/plain'] = self.handle_fallback
def register_handler(self, mimetype, handler):
self[mimetype] = handler
self.handlers.append(handler)
def register_zip_handlers(self):
self.register_handler('application/zip', self.handle_zip)
def register_rar_handlers(self):
self.register_handler('application/x-rar-compressed', self.handle_rar)
def register_pdf_handlers(self):
if PEEPDF:
self.register_handler('application/pdf', self.handle_pdf)
def register_android_handlers(self):
if ANDROGUARD:
self['application/vnd.android.package-archive'] = self.handle_android
def register_java_jnlp_handlers(self):
self['application/x-java-jnlp-file'] = self.handle_java_jnlp
def handle_fallback(self, url, content):
for handler in self.handlers:
try:
if handler(url, content):
return True
except:
pass
return False
def handle_zip(self, url, content):
fp = StringIO(content)
if not zipfile.is_zipfile(fp):
return False
try:
zipdata = zipfile.ZipFile(fp)
except: #pylint:disable=bare-except
return False
for filename in zipdata.namelist():
try:
data = zipdata.read(filename)
except: #pylint:disable=bare-except
continue
sample = log.ThugLogging.log_file(data)
if sample is None:
continue
try:
md5 = sample['md5']
except: #pylint:disable=bare-except
continue
unzipped = os.path.join(log.ThugLogging.baseDir, 'unzipped')
log.ThugLogging.store_content(unzipped, md5, data)
return True
def handle_rar(self, url, content):
fd, rfile = tempfile.mkstemp()
with open(rfile, 'wb') as fd:
fd.write(content)
try:
rardata = rarfile.RarFile(rfile)
except: #pylint:disable=bare-except
os.remove(rfile)
return False
for filename in rardata.namelist():
try:
data = rardata.read(filename)
except: #pylint:disable=bare-except
continue
sample = log.ThugLogging.log_file(data)
if sample is None:
continue
try:
md5 = sample['md5']
except: #pylint:disable=bare-except
continue
unzipped = os.path.join(log.ThugLogging.baseDir, 'unzipped')
log.ThugLogging.store_content(unzipped, md5, data)
os.remove(rfile)
return True
def getPeepXML(self, statsDict, url):
"""
Slightly modified version of Peepdf getPeepXML function
"""
root = etree.Element('peepdf_analysis',
url = 'http://peepdf.eternal-todo.com',
author = 'Jose Miguel Esparza')
analysisDate = etree.SubElement(root, 'date')
analysisDate.text = datetime.today().strftime('%Y-%m-%d %H:%M')
basicInfo = etree.SubElement(root, 'basic')
fileName = etree.SubElement(basicInfo, 'filename')
fileName.text = statsDict['File']
md5 = etree.SubElement(basicInfo, 'md5')
md5.text = statsDict['MD5']
sha1 = etree.SubElement(basicInfo, 'sha1')
sha1.text = statsDict['SHA1']
sha256 = etree.SubElement(basicInfo, 'sha256')
sha256.text = statsDict['SHA256']
size = etree.SubElement(basicInfo, 'size')
size.text = statsDict['Size']
detection = etree.SubElement(basicInfo, 'detection')
if statsDict['Detection'] != [] and statsDict['Detection'] is not None:
detectionRate = etree.SubElement(detection, 'rate')
detectionRate.text = '%d/%d' % (statsDict['Detection'][0], statsDict['Detection'][1])
detectionReport = etree.SubElement(detection, 'report_link')
detectionReport.text = statsDict['Detection report']
version = etree.SubElement(basicInfo, 'pdf_version')
version.text = statsDict['Version']
#binary = etree.SubElement(basicInfo, 'binary', status = statsDict['Binary'].lower())
#linearized = etree.SubElement(basicInfo, 'linearized', status = statsDict['Linearized'].lower())
encrypted = etree.SubElement(basicInfo, 'encrypted', status = statsDict['Encrypted'].lower())
if statsDict['Encryption Algorithms'] != []:
algorithms = etree.SubElement(encrypted, 'algorithms')
for algorithmInfo in statsDict['Encryption Algorithms']:
algorithm = etree.SubElement(algorithms, 'algorithm', bits = str(algorithmInfo[1]))
algorithm.text = algorithmInfo[0]
updates = etree.SubElement(basicInfo, 'updates')
updates.text = statsDict['Updates']
objects = etree.SubElement(basicInfo, 'num_objects')
objects.text = statsDict['Objects']
streams = etree.SubElement(basicInfo, 'num_streams')
streams.text = statsDict['Streams']
comments = etree.SubElement(basicInfo, 'comments')
comments.text = statsDict['Comments']
errors = etree.SubElement(basicInfo, 'errors', num = str(len(statsDict['Errors'])))
for error in statsDict['Errors']:
errorMessageXML = etree.SubElement(errors, 'error_message')
errorMessageXML.text = error
advancedInfo = etree.SubElement(root, 'advanced')
for version in range(len(statsDict['Versions'])):
statsVersion = statsDict['Versions'][version]
if version == 0:
versionType = 'original'
else:
versionType = 'update'
versionInfo = etree.SubElement(advancedInfo, 'version', num = str(version), type = versionType)
catalog = etree.SubElement(versionInfo, 'catalog')
if statsVersion['Catalog']:
catalog.set('object_id', statsVersion['Catalog'])
info = etree.SubElement(versionInfo, 'info')
if statsVersion['Info']:
info.set('object_id', statsVersion['Info'])
objects = etree.SubElement(versionInfo, 'objects', num = statsVersion['Objects'][0])
for _id in statsVersion['Objects'][1]:
_object = etree.SubElement(objects, 'object', id = str(_id))
if statsVersion['Compressed Objects']:
if _id in statsVersion['Compressed Objects'][1]:
_object.set('compressed', 'true')
else:
_object.set('compressed', 'false')
if statsVersion['Errors']:
if _id in statsVersion['Errors'][1]:
_object.set('errors', 'true')
else:
_object.set('errors', 'false')
streams = etree.SubElement(versionInfo, 'streams', num = statsVersion['Streams'][0])
for _id in statsVersion['Streams'][1]:
stream = etree.SubElement(streams, 'stream', id = str(_id))
if statsVersion['Xref Streams']:
if _id in statsVersion['Xref Streams'][1]:
stream.set('xref_stream', 'true')
else:
stream.set('xref_stream', 'false')
if statsVersion['Object Streams']:
if _id in statsVersion['Object Streams'][1]:
stream.set('object_stream', 'true')
else:
stream.set('object_stream', 'false')
if statsVersion['Encoded']:
if _id in statsVersion['Encoded'][1]:
stream.set('encoded', 'true')
if statsVersion['Decoding Errors']:
if _id in statsVersion['Decoding Errors'][1]:
stream.set('decoding_errors', 'true')
else:
stream.set('decoding_errors', 'false')
else:
stream.set('encoded', 'false')
jsObjects = etree.SubElement(versionInfo, 'js_objects')
if statsVersion['Objects with JS code']:
for _id in statsVersion['Objects with JS code'][1]:
etree.SubElement(jsObjects, 'container_object', id = str(_id))
actions = statsVersion['Actions']
events = statsVersion['Events']
vulns = statsVersion['Vulns']
elements = statsVersion['Elements']
suspicious = etree.SubElement(versionInfo, 'suspicious_elements')
if events or actions or vulns or elements:
if events:
triggers = etree.SubElement(suspicious, 'triggers')
for event in events:
trigger = etree.SubElement(triggers, 'trigger', name = event)
for _id in events[event]:
log.ThugLogging.log_exploit_event(url,
"Adobe Acrobat Reader",
"Adobe Acrobat Reader suspicious trigger: %s [object %s]" % (event, _id, )
)
etree.SubElement(trigger, 'container_object', id = str(_id))
if actions:
actionsList = etree.SubElement(suspicious, 'actions')
for action in actions:
actionInfo = etree.SubElement(actionsList, 'action', name = action)
for _id in actions[action]:
log.ThugLogging.log_exploit_event(url,
"Adobe Acrobat Reader",
"Adobe Acrobat Reader suspicious action: %s [object %s]" % (action, _id, )
)
etree.SubElement(actionInfo, 'container_object', id = str(_id))
if elements:
elementsList = etree.SubElement(suspicious, 'elements')
for element in elements:
elementInfo = etree.SubElement(elementsList, 'element', name = element)
if element in vulnsDict:
for vulnCVE in vulnsDict[element]:
if isinstance(vulnCVE, (list, tuple)):
vulnCVE=",".join(vulnCVE)
log.ThugLogging.log_exploit_event(url,
"Adobe Acrobat Reader",
"Adobe Acrobat Reader Exploit (%s)" % (vulnCVE, ),
cve = vulnCVE)
cve = etree.SubElement(elementInfo, 'cve')
cve.text = vulnCVE
for _id in elements[element]:
etree.SubElement(elementInfo, 'container_object', id = str(_id))
if vulns:
vulnsList = etree.SubElement(suspicious, 'js_vulns')
for vuln in vulns:
vulnInfo = etree.SubElement(vulnsList, 'vulnerable_function', name = vuln)
if vuln in vulnsDict:
for vulnCVE in vulnsDict[vuln]:
if isinstance(vulnCVE, (list, tuple)):
vulnCVE=",".join(vulnCVE)
log.ThugLogging.log_exploit_event(url,
"Adobe Acrobat Reader",
"Adobe Acrobat Reader Exploit (%s)" % (vulnCVE, ),
cve = vulnCVE)
cve = etree.SubElement(vulnInfo, 'cve')
cve.text = vulnCVE
for _id in vulns[vuln]:
etree.SubElement(vulnInfo, 'container_object', id = str(_id))
urls = statsVersion['URLs']
#suspiciousURLs = etree.SubElement(versionInfo, 'suspicious_urls')
if urls:
for url in urls:
urlInfo = etree.SubElement(versionInfo, 'url')
urlInfo.text = url
return etree.tostring(root, pretty_print = True)
def swf_mastah(self, pdf, statsDict, url):
"""
This code is taken from SWF Mastah by Brandon Dixon
"""
swfdir = os.path.join(log.ThugLogging.baseDir, 'dropped', 'swf')
count = 0
for version in range(len(statsDict['Versions'])): #pylint:disable=unused-variable
body = pdf.body[count]
objs = body.objects
for index in objs:
#oid = objs[index].id
#offset = objs[index].offset
#size = objs[index].size
details = objs[index].object
if details.type in ("stream", ):
#encoded_stream = details.encodedStream
decoded_stream = details.decodedStream
header = decoded_stream[:3]
is_flash = [s for s in objs if header in ("CWS", "FWS")]
if is_flash:
data = decoded_stream.strip()
sample = log.ThugLogging.log_file(data, url)
if sample is None:
continue
swffile = "%s.swf" % (sample["md5"], )
log.ThugLogging.store_content(swfdir, swffile, data)
log.warning("[PDF] Embedded SWF %s extracted from PDF %s", sample["md5"], statsDict["MD5"])
count += 1
def handle_pdf(self, url, content):
sample = log.ThugLogging.build_sample(content, url)
if sample is None or sample['type'] not in ('PDF', ):
return
fd, rfile = tempfile.mkstemp()
with open(rfile, 'wb') as fd:
fd.write(content)
pdfparser = PDFParser()
try:
ret, pdf = pdfparser.parse(rfile, forceMode = True, looseMode = True) #pylint:disable=unused-variable
except: #pylint:disable=bare-except
os.remove(rfile)
return False
statsDict = pdf.getStats()
analysis = self.getPeepXML(statsDict, url)
log_dir = os.path.join(log.ThugLogging.baseDir, "analysis", "pdf")
log.ThugLogging.log_peepdf(log_dir, sample, analysis)
self.swf_mastah(pdf, statsDict, url)
os.remove(rfile)
return True
def do_build_apk_report(self, a):
output = StringIO()
a.get_files_types()
output.write("[FILES] \n")
for i in a.get_files():
try:
output.write("\t%s %s %x\n" % (i, a.files[i], a.files_crc32[i], ))
except KeyError:
output.write("\t%s %x\n" % (i, a.files_crc32[i], ))
output.write("\n[PERMISSIONS] \n")
details_permissions = a.get_details_permissions()
for i in details_permissions:
output.write("\t%s %s\n" % (i, details_permissions[i], ))
output.write("\n[MAIN ACTIVITY]\n\t%s\n" % (a.get_main_activity(), ))
output.write("\n[ACTIVITIES] \n")
activities = a.get_activities()
for i in activities:
filters = a.get_intent_filters("activity", i)
output.write("\t%s %s\n" % (i, filters or "", ))
output.write("\n[SERVICES] \n")
services = a.get_services()
for i in services:
filters = a.get_intent_filters("service", i)
output.write("\t%s %s\n" % (i, filters or "", ))
output.write("\n[RECEIVERS] \n")
receivers = a.get_receivers()
for i in receivers:
filters = a.get_intent_filters("receiver", i)
output.write("\t%s %s\n" % (i, filters or "", ))
output.write("\n[PROVIDERS]\n\t%s\n\n" % (a.get_providers(), ))
vm = dvm.DalvikVMFormat(a.get_dex())
vmx = analysis.uVMAnalysis(vm)
output.write("Native code : %s\n" % (analysis.is_native_code(vmx), ))
output.write("Dynamic code : %s\n" % (analysis.is_dyn_code(vmx), ))
output.write("Reflection code : %s\n" % (analysis.is_reflection_code(vmx), ))
output.write("ASCII Obfuscation: %s\n\n" % (analysis.is_ascii_obfuscation(vm), ))
for i in vmx.get_methods():
i.create_tags()
if not i.tags.empty():
output.write("%s %s %s\n" % (i.method.get_class_name(),
i.method.get_name(),
i.tags, ))
return output
def save_apk_report(self, sample, a, url):
output = self.do_build_apk_report(a)
log_dir = os.path.join(log.ThugLogging.baseDir, "analysis", "apk")
log.ThugLogging.log_androguard(log_dir, sample, output.getvalue())
def build_apk_sample(self, data, url = None):
sample = {
"md5" : hashlib.md5(data).hexdigest(),
"sha1" : hashlib.sha1(data).hexdigest(),
"raw" : data,
"data" : base64.b64encode(data),
"type" : "APK",
}
if SSDEEP:
sample['ssdeep'] = ssdeep.hash(data)
return sample
def handle_android(self, url, content):
ret = False
fd, rfile = tempfile.mkstemp()
with open(rfile, 'wb') as fd:
fd.write(content)
ret_type = androconf.is_android(rfile)
if ret_type not in ("APK", ):
os.remove(rfile)
return ret
try :
a = apk.APK(rfile, zipmodule = 2)
if a.is_valid_APK():
sample = self.build_apk_sample(content, url)
self.save_apk_report(sample, a, url)
ret = True
except: #pylint:disable=bare-except
pass
os.remove(rfile)
return ret
@property
def javaWebStartUserAgent(self):
javaplugin = log.ThugVulnModules._javaplugin.split('.')
last = javaplugin.pop()
version = '%s_%s' % ('.'.join(javaplugin), last)
return "JNLP/6.0 javaws/%s (b04) Java/%s" % (version, version, )
def handle_java_jnlp(self, url, data):
headers = dict()
headers['Connection'] = 'keep-alive'
try:
soup = BeautifulSoup.BeautifulSoup(data, "lxml")
except: #pylint:disable=bare-except
return
jnlp = soup.find("jnlp")
if jnlp is None:
return
codebase = jnlp.attrs['codebase'] if 'codebase' in jnlp.attrs else ''
log.ThugLogging.add_behavior_warn(description = '[JNLP Detected]', method = 'Dynamic Analysis')
jars = soup.find_all("jar")
if not jars:
return
headers['User-Agent'] = self.javaWebStartUserAgent
for jar in jars:
try:
url = "%s%s" % (codebase, jar.attrs['href'], )
self.window._navigator.fetch(url, headers = headers, redirect_type = "JNLP")
except: #pylint:disable=bare-except
pass
def passthrough(self, url, data):
"""
The method passthrough is the default handler associated to
almost all Content-Types with the few exceptions defined in
register_empty_handlers.
"""
return True
def get_handler(self, key):
return self[key]
|
qistoph/thug
|
src/DOM/MIMEHandler.py
|
Python
|
gpl-2.0
| 30,176
|
[
"NetCDF"
] |
701b9533ed8d8487895a6c49d5cb1806d9a0fed0a5ecdaa5f0ffdd305490d01d
|
r"""
Example of central limit theorem
--------------------------------
Figure 3.20.
An illustration of the central limit theorem. The histogram in each panel shows
the distribution of the mean value of N random variables drawn from the (0, 1)
range (a uniform distribution with :math:`\mu = 0.5` and W = 1; see eq. 3.39).
The distribution for N = 2 has a triangular shape and as N increases it becomes
increasingly similar to a Gaussian, in agreement with the central limit
theorem. The predicted normal distribution with :math:`\mu = 0.5` and
:math:`\sigma = 1/ \sqrt{12 N}` is shown by the line. Already for N = 10,
the "observed" distribution is essentially the same as the predicted
distribution.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import norm
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Generate the uniform samples
N = [2, 3, 10]
np.random.seed(42)
x = np.random.random((max(N), 1E6))
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(hspace=0.05)
for i in range(len(N)):
ax = fig.add_subplot(3, 1, i + 1)
# take the mean of the first N[i] samples
x_i = x[:N[i], :].mean(0)
# histogram the data
ax.hist(x_i, bins=np.linspace(0, 1, 101),
histtype='stepfilled', alpha=0.5, normed=True)
# plot the expected gaussian pdf
mu = 0.5
sigma = 1. / np.sqrt(12 * N[i])
dist = norm(mu, sigma)
x_pdf = np.linspace(-0.5, 1.5, 1000)
ax.plot(x_pdf, dist.pdf(x_pdf), '-k')
ax.set_xlim(0.0, 1.0)
ax.set_ylim(0.001, None)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.2))
ax.yaxis.set_major_locator(plt.MaxNLocator(5))
ax.text(0.99, 0.95, r"$N = %i$" % N[i],
ha='right', va='top', transform=ax.transAxes)
if i == len(N) - 1:
ax.xaxis.set_major_formatter(plt.FormatStrFormatter('%.4f'))
ax.set_xlabel(r'$x$')
else:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylabel('$p(x)$')
plt.show()
|
gtrichards/PHYS_T480
|
code/fig_central_limit.py
|
Python
|
mit
| 2,845
|
[
"Gaussian"
] |
16a0fd2cede79983358d30329bd12c35da0226dc27fa2e50787623252e72fc92
|
import json
from ace_overlay.widgets import AceOverlayWidget
from django import forms
from django.db.models import Q
from django.forms import ModelForm, Textarea
from guardian.shortcuts import get_objects_for_user
from accounts.queries import compile_expression
from studies.models import Lab, Response, Study
from studies.permissions import LabPermission, StudyPermission
CRITERIA_EXPRESSION_HELP_LINK = "https://lookit.readthedocs.io/en/develop/researchers-set-study-fields.html#criteria-expression"
STUDY_TYPE_HELP_LINK = "https://lookit.readthedocs.io/en/develop/researchers-manage-studies.html#editing-study-type"
PROTOCOL_CONFIG_HELP_LINK = (
"https://lookit.readthedocs.io/en/develop/researchers-create-experiment.html"
)
PROTOCOL_GENERATOR_HELP_LINK = (
"https://lookit.readthedocs.io/en/develop/researchers-protocol-generators.html"
)
class ResponseForm(ModelForm):
results = forms.CharField(
widget=AceOverlayWidget(
mode="json",
wordwrap=True,
theme="textmate",
width="100%",
height="100%",
showprintmargin=False,
),
required=False,
)
class Meta:
fields = ("study", "child", "demographic_snapshot", "results")
model = Response
STUDY_TYPE_HELP_TEXT_INITIAL = f"""<p>After selecting an experiment runner type above, you'll be asked
to provide some additional configuration information.</p>
<p>If you're not sure what to enter here, just leave the defaults (you can change this later).
For more information on experiment runner types, please
<a href={STUDY_TYPE_HELP_LINK}>see the documentation.</a></p>"""
# Leave the same for now but may change in the future
STUDY_TYPE_HELP_TEXT_EDIT = STUDY_TYPE_HELP_TEXT_INITIAL
PROTOCOL_HELP_TEXT_EDIT = f"Configure frames to use in your study and specify their order. For information on how to set up your protocol, please <a href={PROTOCOL_CONFIG_HELP_LINK}>see the documentation.</a>"
PROTOCOL_HELP_TEXT_INITIAL = f"{PROTOCOL_HELP_TEXT_EDIT} You can leave the default for now and come back to this later."
DEFAULT_GENERATOR = """function generateProtocol(child, pastSessions) {
/*
* Generate the protocol for this study.
*
* @param {Object} child
* The child currently participating in this study. Includes fields:
* givenName (string)
* birthday (Date)
* gender (string, 'm' / 'f' / 'o')
* ageAtBirth (string, e.g. '25 weeks'. One of '40 or more weeks',
* '39 weeks' through '24 weeks', 'Under 24 weeks', or
* 'Not sure or prefer not to answer')
* additionalInformation (string)
* languageList (string) space-separated list of languages child is
* exposed to (2-letter codes)
* conditionList (string) space-separated list of conditions/characteristics
* of child from registration form, as used in criteria expression
* - e.g. "autism_spectrum_disorder deaf multiple_birth"
*
* Use child.get to access these fields: e.g., child.get('givenName') returns
* the child's given name.
*
* @param {!Array<Object>} pastSessions
* List of past sessions for this child and this study, in reverse time order:
* pastSessions[0] is THIS session, pastSessions[1] the previous session,
* back to pastSessions[pastSessions.length - 1] which has the very first
* session.
*
* Each session has the following fields, corresponding to values available
* in Lookit:
*
* createdOn (Date)
* conditions
* expData
* sequence
* completed
* globalEventTimings
* completedConsentFrame (note - this list will include even "responses")
* where the user did not complete the consent form!
* demographicSnapshot
* isPreview
*
* @return {Object} Protocol specification for Lookit study; object with 'frames'
* and 'sequence' keys.
*/
var protocol = {
frames: {},
sequence: []
};
return protocol;
}
"""
class LabForm(ModelForm):
class Meta:
model = Lab
fields = [
"name",
"institution",
"principal_investigator_name",
"contact_email",
"contact_phone",
"lab_website",
"description",
"irb_contact_info",
]
help_texts = {
"contact_email": (
"This will be the reply-to address when you contact participants, so make sure it is a monitored "
"address or list that lab members can access."
)
}
class LabApprovalForm(ModelForm):
class Meta:
model = Lab
fields = [
"name",
"institution",
"principal_investigator_name",
"contact_email",
"contact_phone",
"lab_website",
"description",
"irb_contact_info",
"approved_to_test",
]
class StudyForm(ModelForm):
"""Base form for creating or editing a study"""
# Eventually when we support other experiment runner types (labjs, jspsych, etc.)
# we may do one of the following:
# - separate the 'study protocol specification' fields into their own
# form which collects various information and cleans it and sets a single 'structure' object,
# with the selected
# - creating a model to represent each study type, likely such that each study has a nullable
# relation for lookit_runner_protocol, jspsych_runner_protocol, etc.
structure = forms.CharField(
label="Protocol configuration",
widget=AceOverlayWidget(
mode="json",
wordwrap=True,
theme="textmate",
width="100%",
height="100%",
showprintmargin=False,
),
required=False,
)
# Define initial value here rather than providing actual default so that any updates don't
# require migrations: this isn't a true "default" value that would ever be used, but rather
# a helpful skeleton to guide the user
generator = forms.CharField(
label="Protocol generator",
widget=AceOverlayWidget(
mode="javascript",
wordwrap=True,
theme="textmate",
width="100%",
height="100%",
showprintmargin=False,
),
required=False,
help_text=(
"Write a Javascript function that returns a study protocol object with 'frames' and "
"'sequence' keys. This allows more flexible randomization and dependence on past sessions in "
f"complex cases. See <a href={PROTOCOL_GENERATOR_HELP_LINK}>documentation</a> for details."
),
initial=DEFAULT_GENERATOR,
)
def clean(self):
cleaned_data = super().clean()
min_age_days = self.cleaned_data.get("min_age_days")
min_age_months = self.cleaned_data.get("min_age_months")
min_age_years = self.cleaned_data.get("min_age_years")
max_age_days = self.cleaned_data.get("max_age_days")
max_age_months = self.cleaned_data.get("max_age_months")
max_age_years = self.cleaned_data.get("max_age_years")
if (min_age_years * 365 + min_age_months * 30 + min_age_days) > (
max_age_years * 365 + max_age_months * 30 + max_age_days
):
raise forms.ValidationError(
"The maximum age must be greater than the minimum age."
)
return cleaned_data
def clean_structure(self):
structure_text = self.cleaned_data["structure"]
# Parse edited text representation of structure object, and additionally store the
# exact text (so user can organize frames, parameters, etc. for readability)
try:
json_data = json.loads(structure_text) # loads string as json
json_data["exact_text"] = structure_text
except:
raise forms.ValidationError(
"Saving protocol configuration failed due to invalid JSON! Please use valid JSON and save again. If you reload this page, all changes will be lost."
)
# Store the object which includes the exact text (not just the text)
return json_data
def clean_criteria_expression(self):
criteria_expression = self.cleaned_data["criteria_expression"]
try:
compile_expression(criteria_expression)
except Exception as e:
raise forms.ValidationError(f"Invalid criteria expression:\n{e.args[0]}")
return criteria_expression
class Meta:
model = Study
fields = [
"name",
"lab",
"image",
"short_description",
"purpose",
"compensation_description",
"exit_url",
"criteria",
"min_age_days",
"min_age_months",
"min_age_years",
"max_age_days",
"max_age_months",
"max_age_years",
"duration",
"contact_info",
"public",
"shared_preview",
"structure",
"generator",
"use_generator",
"criteria_expression",
"study_type",
]
labels = {
"short_description": "Short Description",
"purpose": "Purpose",
"exit_url": "Exit URL",
"criteria": "Participant Eligibility Description",
"contact_info": "Researcher Contact Information",
"public": "Discoverable - List this study on the 'Studies' page once you start it?",
"shared_preview": "Share preview - Allow other Lookit researchers to preview your study and give feedback?",
"study_type": "Experiment Runner Type",
"compensation_description": "Compensation",
"use_generator": "Use protocol generator (advanced)",
}
widgets = {
"short_description": Textarea(attrs={"rows": 2}),
"purpose": Textarea(attrs={"rows": 2}),
"compensation_description": Textarea(attrs={"rows": 2}),
"exit_url": Textarea(attrs={"rows": 1}),
"criteria": Textarea(
attrs={"rows": 1, "placeholder": "For 4-year-olds who love dinosaurs"}
),
"duration": Textarea(attrs={"rows": 1, "placeholder": "15 minutes"}),
"contact_info": Textarea(
attrs={
"rows": 1,
"placeholder": "Jane Smith (contact: jsmith@science.edu)",
}
),
"criteria_expression": Textarea(
attrs={
"rows": 3,
"placeholder": (
"ex: ((deaf OR hearing_impairment) OR NOT speaks_en) "
"AND "
"(age_in_days >= 365 AND age_in_days <= 1095)"
),
}
),
}
help_texts = {
"lab": "Which lab this study will be affiliated with",
"image": "Please keep your file size less than 1 MB",
"exit_url": "Specify the page where you want to send your participants after they've completed the study. (The 'Past studies' page on Lookit is a good default option.)",
"short_description": "Describe what happens during your study here. This should give families a concrete idea of what they will be doing - e.g., reading a story together and answering questions, watching a short video, playing a game about numbers.",
"purpose": "Explain the purpose of your study here. This should address what question this study answers AND why that is an interesting or important question, in layperson-friendly terms.",
"contact_info": "This should give the name of the PI for your study, and an email address where the PI or study staff can be reached with questions. Format: PIs Name (contact: youremail@lab.edu)",
"criteria": "Text shown to families - this is not used to actually verify eligibility.",
"compensation_description": "Provide a description of any compensation for participation, including when and how participants will receive it and any limitations or eligibility criteria (e.g., only one gift card per participant, being in age range for study, child being visible in consent video). Please see the Terms of Use for details on allowable compensation and restrictions. If this field is left blank it will not be displayed to participants.",
"criteria_expression": (
"Provide a relational expression indicating any criteria for eligibility besides the age range specified below."
"For more information on how to structure criteria expressions, please visit our "
f"<a href={CRITERIA_EXPRESSION_HELP_LINK}>documentation</a>."
),
}
class StudyEditForm(StudyForm):
"""Form for editing study"""
def __init__(self, user=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["structure"].help_text = PROTOCOL_HELP_TEXT_EDIT
self.fields["study_type"].help_text = STUDY_TYPE_HELP_TEXT_EDIT
# Restrict ability to edit study lab based on user permissions
can_change_lab = user.has_study_perms(
StudyPermission.CHANGE_STUDY_LAB, self.instance
)
if can_change_lab:
self.fields["lab"].help_text = (
"Which lab this study will be affiliated with. Be careful changing the lab of an existing study: "
"this will affect who can view and edit the study."
)
# Limit labs to change to: current lab, or labs this user is a member of & can create studies in
self.fields["lab"].queryset = Lab.objects.filter(
Q(
id__in=get_objects_for_user(
user,
LabPermission.CREATE_LAB_ASSOCIATED_STUDY.prefixed_codename,
).only("id")
)
| (Q(uuid=self.instance.lab.uuid))
)
else:
# Ensure we display the current lab on the edit form, even if user isn't part of this lab (which
# isn't technically possible the way permissions are set up, but in principle options should be
# current if lab can't be changed, and user's labs otherwise)
self.fields["lab"].queryset = Lab.objects.filter(
uuid=self.instance.lab.uuid
)
self.fields["lab"].disabled = True
class StudyCreateForm(StudyForm):
"""Form for creating a new study"""
def __init__(self, user=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["structure"].help_text = PROTOCOL_HELP_TEXT_INITIAL
self.fields["study_type"].help_text = STUDY_TYPE_HELP_TEXT_INITIAL
# Limit initial lab options to labs this user is a member of & can create studies in
self.fields["lab"].queryset = Lab.objects.filter(
id__in=get_objects_for_user(
user, LabPermission.CREATE_LAB_ASSOCIATED_STUDY.prefixed_codename
).only("id")
)
|
CenterForOpenScience/lookit-api
|
studies/forms.py
|
Python
|
apache-2.0
| 15,530
|
[
"VisIt"
] |
f7fc9dbf40fb28e08972cc38dadb3bf467de5d4146d421408b320cb917b4a0e0
|
# coding=utf-8
"""
Common methods for this project.
"""
from __future__ import print_function, division
# Util Methods #
import argparse
import collections
import csv
import difflib
import glob
from datetime import datetime
import re
import shutil
import errno
import fnmatch
from itertools import chain, islice
import math
import numpy as np
import os
from shutil import copy2, Error, copystat
import six
import sys
from contextlib import contextmanager
# Constants #
TPL_IO_ERR_MSG = "Couldn't read template at: '{}'"
MISSING_SEC_HEADER_ERR_MSG = "Configuration files must start with a section header such as '[main]'. Check file: {}"
BACKUP_TS_FMT = "_%Y-%m-%d_%H-%M-%S_%f"
# Boltzmann's Constant in kcal/mol Kelvin
BOLTZ_CONST = 0.0019872041
# Planck's Constant in kcal s / mol
PLANCK_CONST = 9.53707E-14
# Universal gas constant in kcal/mol K
R = 0.001985877534
XYZ_ORIGIN = np.zeros(3)
# Tolerance initially based on double standard machine precision of 5 × 10−16 for float64 (decimal64)
# found to be too stringent
TOL = 0.00000000001
# similarly, use this to round away the insignificant digits!
SIG_DECIMALS = 12
# Sections for reading files
SEC_TIMESTEP = 'timestep'
SEC_NUM_ATOMS = 'dump_num_atoms'
SEC_BOX_SIZE = 'dump_box_size'
SEC_ATOMS = 'atoms_section'
# From template files
NUM_ATOMS = 'num_atoms'
HEAD_CONTENT = 'head_content'
ATOMS_CONTENT = 'atoms_content'
TAIL_CONTENT = 'tail_content'
# Lammps-specific sections
MASSES = 'Masses'
PAIR_COEFFS = 'Pair Coeffs'
ATOMS = 'Atoms'
BOND_COEFFS = 'Bond Coeffs'
BONDS = 'Bonds'
ANGLE_COEFFS = 'Angle Coeffs'
ANGLES = 'Angles'
DIHE_COEFFS = 'Dihedral Coeffs'
DIHES = 'Dihedrals'
IMPR_COEFFS = 'Improper Coeffs'
IMPRS = 'Impropers'
LAMMPS_SECTION_NAMES = [MASSES, PAIR_COEFFS, ATOMS, BOND_COEFFS, BONDS, ANGLE_COEFFS, ANGLES,
DIHE_COEFFS, DIHES, IMPR_COEFFS, IMPRS]
# PDB file info
PDB_FORMAT = '{:s}{:s}{:s}{:s}{:4d} {:8.3f}{:8.3f}{:8.3f}{:s}'
PDB_LINE_TYPE_LAST_CHAR = 6
PDB_ATOM_NUM_LAST_CHAR = 11
PDB_ATOM_TYPE_LAST_CHAR = 17
PDB_RES_TYPE_LAST_CHAR = 22
PDB_MOL_NUM_LAST_CHAR = 28
PDB_X_LAST_CHAR = 38
PDB_Y_LAST_CHAR = 46
PDB_Z_LAST_CHAR = 54
# Error Codes
# The good status code
GOOD_RET = 0
INPUT_ERROR = 1
IO_ERROR = 2
INVALID_DATA = 3
PY2 = sys.version_info[0] == 2
# PY3 = sys.version_info[0] == 3
# Exceptions #
class MdError(Exception):
pass
class InvalidDataError(MdError):
pass
class NotFoundError(MdError):
pass
class ArgumentParserError(Exception):
pass
class TemplateNotReadableError(Exception):
pass
class ThrowingArgumentParser(argparse.ArgumentParser):
def error(self, message):
raise ArgumentParserError(message)
def warning(*objs):
"""Writes a message to stderr."""
print("WARNING: ", *objs, file=sys.stderr)
# Test utilities
# From http://schinckel.net/2013/04/15/capture-and-test-sys.stdout-sys.stderr-in-unittest.testcase/
@contextmanager
def capture_stdout(command, *args, **kwargs):
# pycharm doesn't know six very well, so ignore the false warning
# noinspection PyCallingNonCallable
out, sys.stdout = sys.stdout, six.StringIO()
command(*args, **kwargs)
sys.stdout.seek(0)
yield sys.stdout.read()
sys.stdout = out
@contextmanager
def capture_stderr(command, *args, **kwargs):
# pycharm doesn't know six very well, so ignore the false warning
# noinspection PyCallingNonCallable
err, sys.stderr = sys.stderr, six.StringIO()
command(*args, **kwargs)
sys.stderr.seek(0)
yield sys.stderr.read()
sys.stderr = err
# Calculations #
def calc_kbt(temp_k):
"""
Returns the given temperature in Kelvin multiplied by Boltzmann's Constant.
@param temp_k: A temperature in Kelvin.
@return: The given temperature in Kelvin multiplied by Boltzmann's Constant.
"""
return BOLTZ_CONST * temp_k
def calc_k(temp, delta_gibbs):
"""
Returns the rate coefficient calculated from Transition State Theory in inverse seconds
@param temp: the temperature in Kelvin
@param delta_gibbs: the change in Gibbs free energy in kcal/mol
@return: rate coefficient in inverse seconds
"""
return BOLTZ_CONST * temp / PLANCK_CONST * math.exp(-delta_gibbs / (R * temp))
def pbc_dist(a, b, box):
# TODO: make a test that ensures the distance calculated is <= sqrt(sqrt((a/2)^2+(b/2)^2) + (c/2)^2)) ?
return np.linalg.norm(pbc_calc_vector(a, b, box))
def pbc_calc_vector(a, b, box):
"""
Finds the vectors between two points
@param a: xyz coords 1
@param b: xyz coords 2
@param box: vector with PBC box dimensions
@return: returns the vector a - b
"""
vec = np.subtract(a, b)
return vec - np.multiply(box, np.asarray(list(map(round, vec / box))))
def first_pbc_image(xyz_coords, box):
"""
Moves xyz coords to the first PBC image, centered at the origin
@param xyz_coords: coordinates to center (move to first image)
@param box: PBC box dimensions
@return: xyz coords (np array) moved to the first image
"""
return pbc_calc_vector(xyz_coords, XYZ_ORIGIN, box)
def pbc_vector_avg(a, b, box):
diff = pbc_calc_vector(a, b, box)
mid_pt = np.add(b, np.divide(diff, 2.0))
# mid-point may not be in the first periodic image. Make it so by getting its difference from the origin
return pbc_calc_vector(mid_pt, np.zeros(len(mid_pt)), box)
def unit_vector(vector):
""" Returns the unit vector of the vector.
http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
"""
return vector / np.linalg.norm(vector)
def vec_angle(vec_1, vec_2):
"""
Calculates the angle between the vectors (p2 - p1) and (p0 - p1)
Note: assumes the vector calculation accounted for the PBC
@param vec_1: xyz coordinate for the first pt
@param vec_2: xyz for 2nd pt
@return: the angle in between the vectors
"""
unit_vec_1 = unit_vector(vec_1)
unit_vec_2 = unit_vector(vec_2)
return np.rad2deg(np.arccos(np.clip(np.dot(unit_vec_1, unit_vec_2), -1.0, 1.0)))
def vec_dihedral(vec_ba, vec_bc, vec_cd):
"""
calculates the dihedral angle from the vectors b --> a, b --> c, c --> d
where a, b, c, and d are the four points
From:
http://stackoverflow.com/questions/20305272/
dihedral-torsion-angle-from-four-points-in-cartesian-coordinates-in-python
Khouli formula
1 sqrt, 1 cross product
@param vec_ba: the vector connecting points b --> a, accounting for pbc
@param vec_bc: b --> c
@param vec_cd: c --> d
@return: dihedral angle in degrees
"""
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
vec_bc = unit_vector(vec_bc)
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = vec_ba - np.dot(vec_ba, vec_bc) * vec_bc
w = vec_cd - np.dot(vec_cd, vec_bc) * vec_bc
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.dot(v, w)
y = np.dot(np.cross(vec_bc, v), w)
return np.degrees(np.arctan2(y, x))
# Other #
def chunk(seq, chunk_size, process=iter):
"""Yields items from an iterator in iterable chunks.
From https://gist.github.com/ksamuel/1275417
@param seq: The sequence to chunk.
@param chunk_size: The size of the returned chunks.
@param process: The function to use for creating the iterator. Useful for iterating over different
data structures.
@return: Chunks of the given size from the given sequence.
"""
it = iter(seq)
while True:
yield process(chain([six.next(it)], islice(it, chunk_size - 1)))
# I/O #
def read_tpl(tpl_loc):
"""Attempts to read the given template location and throws A
TemplateNotReadableError if it can't read the given location.
:param tpl_loc: The template location to read.
:raise TemplateNotReadableError: If there is an IOError reading the location.
"""
try:
return file_to_str(tpl_loc)
except IOError:
raise TemplateNotReadableError(TPL_IO_ERR_MSG.format(tpl_loc))
def make_dir(tgt_dir):
"""
Creates the given directory and its parent directories if it
does not already exist.
Keyword arguments:
tgt_dir -- The directory to create
"""
if not os.path.exists(tgt_dir):
os.makedirs(tgt_dir)
elif not os.path.isdir(tgt_dir):
raise NotFoundError("Resource {} exists and is not a dir".format(tgt_dir))
def file_to_str(fname):
"""
Reads and returns the contents of the given file.
@param fname: The location of the file to read.
@return: The contents of the given file.
:raises: IOError if the file can't be opened for reading.
"""
with open(fname) as f:
return f.read()
def file_rows_to_list(c_file):
"""
Given the name of a file, returns a list of its rows, after filtering out empty rows
@param c_file: file location
@return: list of non-empty rows
"""
with open(c_file) as f:
row_list = [row.strip() for row in f.readlines()]
return list(filter(None, row_list))
def str_to_file(str_val, fname, mode='w', print_info=False):
"""
Writes the string to the given file.
@param str_val: The string to write.
@param fname: The location of the file to write
@param mode: default mode is to overwrite file
@param print_info: boolean to specify whether to print action to stdout
"""
with open(fname, mode) as f:
f.write(str_val)
if print_info:
print("Wrote file: {}".format(fname))
def round_to_print(val):
"""
To remove floating point digits that are imprecise due to machine precision
@param val: a float
@return: a float without insignificant digits
"""
return round(val, SIG_DECIMALS)
def np_float_array_from_file(data_file, delimiter=" ", header=False, gather_hist=False):
"""
Adds to the basic np.loadtxt by performing data checks.
@param data_file: file expected to have space-separated values, with the same number of entries per row
@param delimiter: default is a space-separated file
@param header: default is no header; alternately, specify number of header lines
@param gather_hist: default is false; gather data to make histogram of non-numerical data
@return: a numpy array or InvalidDataError if unsuccessful, followed by the header_row (None if none specified)
"""
header_row = None
hist_data = {}
with open(data_file) as csv_file:
csv_list = list(csv.reader(csv_file, delimiter=delimiter))
if header:
header_row = csv_list[0]
try:
data_array = np.genfromtxt(data_file, dtype=np.float64, delimiter=delimiter, skip_header=header)
except ValueError:
data_array = None
line_len = None
if header:
first_line = 1
else:
first_line = 0
for row in csv_list[first_line:]:
if len(row) == 0:
continue
s_len = len(row)
if line_len is None:
line_len = s_len
elif s_len != line_len:
raise InvalidDataError('File could not be read as an array of floats: {}\n Expected '
'values separated by "{}" with an equal number of columns per row.\n'
' However, found {} values on the first data row'
' and {} values on the later row: "{}")'
''.format(data_file, delimiter, line_len, s_len, row))
data_vector = np.empty([line_len], dtype=np.float64)
for col in range(line_len):
try:
data_vector[col] = float(row[col])
except ValueError:
data_vector[col] = np.nan
if gather_hist:
col_key = str(row[col])
if col in hist_data:
if col_key in hist_data[col]:
hist_data[col][col_key] += 1
else:
hist_data[col][col_key] = 1
else:
hist_data[col] = {col_key: 1}
if data_array is None:
data_array = np.copy(data_vector)
else:
data_array = np.vstack((data_array, data_vector))
if len(data_array.shape) == 1:
raise InvalidDataError("File contains a vector, not an array of floats: {}\n".format(data_file))
if np.isnan(data_array).any():
warning("Encountered entry (or entries) which could not be converted to a float. "
"'nan' will be returned for the stats for that column.")
return data_array, header_row, hist_data
def read_csv_to_list(data_file, delimiter=',', header=False):
"""
Reads file of values; did not use np.loadtxt because can have floats and strings
@param data_file: name of delimiter-separated file with the same number of entries per row
@param delimiter: string: delimiter between column values
@param header: boolean to denote if file contains a header
@return: a list containing the data (removing header row, if one is specified) and a list containing the
header row (empty if no header row specified)
"""
with open(data_file) as csv_file:
csv_list = list(csv.reader(csv_file, delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC))
header_row = []
if header:
first_line = 1
header_row = csv_list[0]
else:
first_line = 0
return csv_list[first_line:], header_row
def create_backup_filename(orig):
base, ext = os.path.splitext(orig)
now = datetime.now()
return "".join((base, now.strftime(BACKUP_TS_FMT), ext))
def find_backup_filenames(orig):
base, ext = os.path.splitext(orig)
found = glob.glob(base + "*" + ext)
try:
found.remove(orig)
except ValueError:
# Original not present; ignore.
pass
return found
def silent_remove(filename, disable=False):
"""
Removes the target file name, catching and ignoring errors that indicate that the
file does not exist.
@param filename: The file to remove.
@param disable: boolean to flag if want to disable removal
"""
if not disable:
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def allow_write(f_loc, overwrite=False):
"""
Returns whether to allow writing to the given location.
@param f_loc: The location to check.
@param overwrite: Whether to allow overwriting an existing location.
@return: Whether to allow writing to the given location.
"""
if os.path.exists(f_loc) and not overwrite:
warning("Not overwriting existing file '{}'".format(f_loc))
return False
return True
def move_existing_file(f_loc):
"""
Renames an existing file using a timestamp based on the move time.
@param f_loc: The location to check.
"""
if os.path.exists(f_loc):
shutil.move(f_loc, create_backup_filename(f_loc))
def get_fname_root(src_file):
"""
@param src_file:
@return: the file root name (no directory, no extension)
"""
return os.path.splitext(os.path.basename(src_file))[0]
def create_out_fname(src_file, prefix='', suffix='', remove_prefix=None, base_dir=None, ext=None):
"""Creates an outfile name for the given source file.
@param remove_prefix: string to remove at the beginning of file name
@param src_file: The file to process.
@param prefix: The file prefix to add, if specified.
@param suffix: The file suffix to append, if specified.
@param base_dir: The base directory to use; defaults to `src_file`'s directory.
@param ext: The extension to use instead of the source file's extension;
defaults to the `scr_file`'s extension.
@return: The output file name.
"""
if base_dir is None:
base_dir = os.path.dirname(src_file)
file_name = os.path.basename(src_file)
if remove_prefix is not None and file_name.startswith(remove_prefix):
base_name = file_name[len(remove_prefix):]
else:
base_name = os.path.splitext(file_name)[0]
if ext is None:
ext = os.path.splitext(file_name)[1]
return os.path.abspath(os.path.join(base_dir, prefix + base_name + suffix + ext))
def find_files_by_dir(tgt_dir, pat):
"""Recursively searches the target directory tree for files matching the given pattern.
The results are returned as a dict with a list of found files keyed by the absolute
directory name.
@param tgt_dir: The target base directory.
@param pat: The file pattern to search for.
@return: A dict where absolute directory names are keys for lists of found file names
that match the given pattern.
"""
match_dirs = {}
for root, dirs, files in os.walk(tgt_dir):
matches = [match for match in files if fnmatch.fnmatch(match, pat)]
if matches:
match_dirs[os.path.abspath(root)] = matches
return match_dirs
def copytree(src, dst, symlinks=False, ignore=None):
"""This is a copy of the standard Python shutil.copytree, but it
allows for an existing destination directory.
Recursively copy a directory tree using copy2().
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
@param src: The source directory.
@param dst: The destination directory.
@param symlinks: Whether to follow symbolic links.
@param ignore: A callable for items to ignore at a given level.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
if not os.path.exists(dst):
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
src_name = os.path.join(src, name)
dst_name = os.path.join(dst, name)
try:
if symlinks and os.path.islink(src_name):
link_to = os.readlink(src_name)
os.symlink(link_to, dst_name)
elif os.path.isdir(src_name):
copytree(src_name, dst_name, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(src_name, dst_name)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((src_name, dst_name, str(why)))
try:
copystat(src, dst)
except OSError as why:
# can't copy file access times on Windows
# noinspection PyUnresolvedReferences
if why.winerror is None:
errors.extend((src, dst, str(why)))
if errors:
raise Error(errors)
# CSV #
def read_csv_header(src_file):
"""Returns a list containing the values from the first row of the given CSV
file or None if the file is empty.
@param src_file: The CSV file to read.
@return: The first row or None if empty.
"""
with open(src_file) as csv_file:
for row in csv.reader(csv_file):
return list(row)
def convert_dict_line(all_conv, data_conv, line):
s_dict = {}
for s_key, s_val in line.items():
if data_conv and s_key in data_conv:
try:
s_dict[s_key] = data_conv[s_key](s_val)
except ValueError as e:
warning("Could not convert value '{}' from column '{}': '{}'. Leaving as str".format(s_val, s_key, e))
s_dict[s_key] = s_val
elif all_conv:
try:
s_dict[s_key] = all_conv(s_val)
except ValueError as e:
warning("Could not convert value '{}' from column '{}': '{}'. Leaving as str".format(s_val, s_key, e))
s_dict[s_key] = s_val
else:
s_dict[s_key] = s_val
return s_dict
def read_csv(src_file, data_conv=None, all_conv=None, quote_style=csv.QUOTE_MINIMAL):
"""
Reads the given CSV (comma-separated with a first-line header row) and returns a list of
dicts where each dict contains a row's data keyed by the header row.
@param src_file: The CSV to read.
@param data_conv: A map of header keys to conversion functions. Note that values
that throw a TypeError from an attempted conversion are left as strings in the result.
@param all_conv: A function to apply to all values in the CSV. A specified data_conv value
takes precedence.
@param quote_style: how to read the dictionary
@return: A list of dicts containing the file's data.
"""
result = []
with open(src_file) as csv_file:
csv_reader = csv.DictReader(csv_file, quoting=quote_style)
for line in csv_reader:
result.append(convert_dict_line(all_conv, data_conv, line))
return result
def read_csv_to_dict(src_file, col_name, data_conv=None, all_conv=None):
"""
Reads the given CSV (comma-separated with a first-line header row) and returns a
dict of dicts indexed on the given col_name. Each dict contains a row's data keyed by the header row.
@param src_file: The CSV to read.
@param col_name: the name of the column to index on
@param data_conv: A map of header keys to conversion functions. Note that values
that throw a TypeError from an attempted conversion are left as strings in the result.
@param all_conv: A function to apply to all values in the CSV. A specified data_conv value
takes precedence.
@return: A list of dicts containing the file's data.
"""
result = {}
with open(src_file) as csv_file:
try:
csv_reader = csv.DictReader(csv_file, quoting=csv.QUOTE_NONNUMERIC)
create_dict(all_conv, col_name, csv_reader, data_conv, result, src_file)
except ValueError:
csv_reader = csv.DictReader(csv_file)
create_dict(all_conv, col_name, csv_reader, data_conv, result, src_file)
return result
def create_dict(all_conv, col_name, csv_reader, data_conv, result, src_file):
for line in csv_reader:
val = convert_dict_line(all_conv, data_conv, line)
if col_name in val:
try:
col_val = int(val[col_name])
except ValueError:
col_val = val[col_name]
if col_val in result:
warning("Duplicate values found for {}. Value for key will be overwritten.".format(col_val))
result[col_val] = convert_dict_line(all_conv, data_conv, line)
else:
raise InvalidDataError("Could not find value for {} in file {} on line {}."
"".format(col_name, src_file, line))
def write_csv(data, out_fname, fieldnames, extrasaction="raise", mode='w', quote_style=csv.QUOTE_NONNUMERIC,
print_message=True, round_digits=False):
"""
Writes the given data to the given file location.
@param round_digits: if desired, provide decimal number for rounding
@param data: The data to write (list of dicts).
@param out_fname: The name of the file to write to.
@param fieldnames: The sequence of field names to use for the header.
@param extrasaction: What to do when there are extra keys. Acceptable
values are "raise" or "ignore".
@param mode: default mode is to overwrite file
@param print_message: boolean to flag whether to note that file written or appended
@param quote_style: dictates csv output style
"""
with open(out_fname, mode) as csv_file:
writer = csv.DictWriter(csv_file, fieldnames, extrasaction=extrasaction, quoting=quote_style)
if mode == 'w':
writer.writeheader()
if round_digits:
for row_id in range(len(data)):
new_dict = {}
for key, val in data[row_id].items():
if isinstance(val, float):
new_dict[key] = round(val, round_digits)
else:
new_dict[key] = val
data[row_id] = new_dict
writer.writerows(data)
if print_message:
if mode == 'a':
print(" Appended: {}".format(out_fname))
elif mode == 'w':
print("Wrote file: {}".format(out_fname))
def list_to_csv(data, out_fname, delimiter=',', mode='w', quote_style=csv.QUOTE_NONNUMERIC,
print_message=True, round_digits=False):
"""
Writes the given data to the given file location.
@param data: The data to write (list of lists).
@param out_fname: The name of the file to write to.
@param delimiter: string
@param mode: default mode is to overwrite file
@param quote_style: csv quoting style
@param print_message: boolean to allow update
@param round_digits: boolean to affect printing output; supply an integer to round to that number of decimals
"""
with open(out_fname, mode) as csv_file:
writer = csv.writer(csv_file, delimiter=delimiter, quoting=quote_style)
if round_digits:
for row_id in range(len(data)):
new_row = []
for val in data[row_id]:
if isinstance(val, float):
new_row.append(round(val, round_digits))
else:
new_row.append(val)
data[row_id] = new_row
writer.writerows(data)
if print_message:
print("Wrote file: {}".format(out_fname))
# Other input/output files
def read_csv_dict(d_file, ints=True, one_to_one=True, pdb_dict=False, str_float=False):
"""
If an dictionary file is given, read it and return the dict[old]=new.
Checks that all keys are unique.
If one_to_one=True, checks that there 1:1 mapping of keys and values.
@param d_file: the file with csv of old_id,new_id
@param ints: boolean to indicate if the values are to be read as integers
@param one_to_one: flag to check for one-to-one mapping in the dict
@param pdb_dict: flag to format as required for the PDB output
@param str_float: indicates dictionary is a string followed by a float
@return: new_dict
"""
new_dict = {}
if pdb_dict:
ints = False
one_to_one = False
elif str_float:
ints = False
one_to_one = False
pdb_dict = False
# If d_file is None, return the empty dictionary, as no dictionary file was specified
if d_file is not None:
with open(d_file) as csv_file:
reader = csv.reader(csv_file)
key_count = 0
for row in reader:
if len(row) == 0:
continue
if len(row) == 2:
if pdb_dict:
atom_type = row[0].strip()
type_len = len(atom_type)
element_type = row[1].strip()
if len(element_type) > 2 or type_len > 4:
raise InvalidDataError("Error reading line '{}' in file: {}\n "
"Expected to read atom_type,element_type, with atom type no more "
"than 4 characters and element_type no more than 2."
"".format(row, d_file))
if type_len == 4:
atom_type = ' {:s} '.format(atom_type)
else:
atom_type = ' {:4s}'.format(atom_type)
new_dict[atom_type] = '{:>2s}'.format(element_type)
elif ints:
new_dict[int(row[0])] = int(row[1])
elif str_float:
new_dict[row[0]] = float(row[1])
else:
new_dict[row[0]] = row[1]
key_count += 1
else:
raise InvalidDataError("Error reading line '{}' in file: {}\n"
" Expected exactly two comma-separated values per row."
"".format(row, d_file))
if key_count == len(new_dict):
if one_to_one:
for key in new_dict:
if not (key in new_dict.values()):
raise InvalidDataError('Did not find a 1:1 mapping of key,val ids in {}'.format(d_file))
else:
raise InvalidDataError('A non-unique key value (first column) found in file: {}\n'.format(d_file))
return new_dict
def create_element_dict(dict_file, pdb_dict=True, one_to_one=False):
# This is used when need to add atom types to PDB file
element_dict = {}
if dict_file is not None:
return read_csv_dict(dict_file, pdb_dict=pdb_dict, ints=False, one_to_one=one_to_one)
return element_dict
def list_to_file(list_to_print, fname, list_format=None, delimiter=' ', mode='w', print_message=True):
"""
Writes the list of sequences to the given file in the specified format for a PDB.
@param list_to_print: A list of lines to print. The list may be a list of lists, list of strings, or a mixture.
@param fname: The location of the file to write.
@param list_format: Specified formatting for the line if the line is list.
@param delimiter: If no format is given and the list contains lists, the delimiter will join items in the list.
@param print_message: boolean to determine whether to write to output if the file is printed or appended
@param mode: write by default; can be changed to allow appending to file.
"""
with open(fname, mode) as w_file:
for line in list_to_print:
if isinstance(line, six.string_types):
w_file.write(line + '\n')
elif isinstance(line, collections.Iterable):
if list_format is None:
w_file.write(delimiter.join(map(str, line)) + "\n")
else:
w_file.write(list_format.format(*line) + '\n')
if print_message:
if mode == 'w':
print("Wrote file: {}".format(fname))
elif mode == 'a':
print(" Appended: {}".format(fname))
def print_qm_kind(int_list, element_name, fname, mode='w'):
"""
Writes the list to the given file, formatted for CP2K to read as qm atom indices.
@param int_list: The list to write.
@param element_name: element type to designate
@param fname: The location of the file to write.
@param mode: default is to write to a new file. Use option to designate to append to existing file.
"""
with open(fname, mode) as m_file:
m_file.write(' &QM_KIND {}\n'.format(element_name))
m_file.write(' MM_INDEX {}\n'.format(' '.join(map(str, int_list))))
m_file.write(' &END QM_KIND\n')
if mode == 'w':
print("Wrote file: {}".format(fname))
def print_mm_kind(atom_type, radius, fname, mode='w'):
"""
Writes the list to the given file, formatted for CP2K to read as qm atom indices.
@param atom_type: (str) MM atom type
@param radius: radius to list for covalent radius (smoothing point charge)
@param fname: The location of the file to write.
@param mode: default is to write to a new file. Use option to designate to append to existing file.
"""
with open(fname, mode) as m_file:
m_file.write(' &MM_KIND {}\n'.format(atom_type))
m_file.write(' RADIUS {}\n'.format(radius))
m_file.write(' &END MM_KIND\n')
if mode == 'w':
print("Wrote file: {}".format(fname))
def print_qm_links(c_alpha_dict, c_beta_dict, f_name, mode="w"):
"""
Note: this needs to be tested. Only ran once to get the protein residues set up correctly.
@param c_alpha_dict: dict of protein residue to be broken to c_alpha atom id
@param c_beta_dict: as above, but for c_beta
@param f_name: The location of the file to write.
@param mode: default is to write to a new file. Use option to designate to append to existing file.
"""
with open(f_name, mode) as m_file:
for resid in c_beta_dict:
m_file.write(' !! Break resid {} between CA and CB, and cap CB with hydrogen\n'
' &LINK\n MM_INDEX {} !! CA\n QM_INDEX {} !! CB\n'
' LINK_TYPE IMOMM\n ALPHA_IMOMM 1.5\n'
' &END LINK\n'.format(resid, c_alpha_dict[resid], c_beta_dict[resid]))
if mode == 'w':
print("Wrote file: {}".format(f_name))
# Conversions #
def to_int_list(raw_val):
return_vals = []
for val in raw_val.split(','):
return_vals.append(int(val.strip()))
return return_vals
def to_list(raw_val):
return_vals = []
for val in raw_val.split(','):
return_vals.append(val.strip())
return return_vals
def str_to_bool(s):
"""
Basic converter for Python boolean values written as a str.
@param s: The value to convert.
@return: The boolean value of the given string.
@raises: ValueError if the string value cannot be converted.
"""
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError("Cannot covert {} to a bool".format(s))
def fmt_row_data(raw_data, fmt_str):
""" Formats the values in the dicts in the given list of raw data using
the given format string.
*This may not be needed at all*
Now that I'm using csv.QUOTE_NONNUMERIC, generally don't want to format floats to strings
@param raw_data: The list of dicts to format.
@param fmt_str: The format string to use when formatting.
@return: The formatted list of dicts.
"""
fmt_rows = []
for row in raw_data:
fmt_row = {}
for key, raw_val in row.items():
fmt_row[key] = fmt_str.format(raw_val)
fmt_rows.append(fmt_row)
return fmt_rows
def conv_raw_val(param, def_val, int_list=True):
"""
Converts the given parameter into the given type (default returns the raw value). Returns the default value
if the param is None.
@param param: The value to convert.
@param def_val: The value that determines the type to target.
@param int_list: flag to specify if lists should converted to a list of integers
@return: The converted parameter value.
"""
if param is None:
return def_val
if isinstance(def_val, bool):
if param in ['T', 't', 'true', 'TRUE', 'True']:
return True
else:
return False
if isinstance(def_val, int):
return int(param)
if isinstance(def_val, float):
return float(param)
if isinstance(def_val, list):
if int_list:
return to_int_list(param)
else:
return to_list(param)
return param
def process_cfg(raw_cfg, def_cfg_vals=None, req_keys=None, int_list=True):
"""
Converts the given raw configuration, filling in defaults and converting the specified value (if any) to the
default value's type.
@param raw_cfg: The configuration map.
@param def_cfg_vals: dictionary of default values
@param req_keys: dictionary of required types
@param int_list: flag to specify if lists should converted to a list of integers
@return: The processed configuration.
"""
proc_cfg = {}
for key in raw_cfg:
if not (key in def_cfg_vals or key in req_keys):
raise InvalidDataError("Unexpected key '{}' in configuration ('ini') file.".format(key))
key = None
try:
for key, def_val in def_cfg_vals.items():
proc_cfg[key] = conv_raw_val(raw_cfg.get(key), def_val, int_list)
for key, type_func in req_keys.items():
proc_cfg[key] = type_func(raw_cfg[key])
except KeyError as e:
raise KeyError("Missing config val for key '{}'".format(key, e))
except Exception as e:
raise InvalidDataError('Problem with config vals on key {}: {}'.format(key, e))
return proc_cfg
def dequote(s):
"""
from: http://stackoverflow.com/questions/3085382/python-how-can-i-strip-first-and-last-double-quotes
If a string has single or double quotes around it, remove them.
Make sure the pair of quotes match.
If a matching pair of quotes is not found, return the string unchanged.
"""
if isinstance(s, str) and len(s) > 0:
if (s[0] == s[-1]) and s.startswith(("'", '"')):
return s[1:-1]
return s
def quote(s):
"""
Converts a variable into a quoted string
"""
if (s[0] == s[-1]) and s.startswith(("'", '"')):
return str(s)
return '"' + str(s) + '"'
def single_quote(s):
"""
Converts a variable into a quoted string
"""
if s[0] == s[-1]:
if s.startswith("'"):
return str(s)
elif s.startswith('"'):
s = dequote(s)
return "'" + str(s) + "'"
# Comparisons #
def conv_num(s):
try:
return int(s)
except ValueError:
try:
return float(s)
except ValueError:
return s
def diff_lines(floc1, floc2, delimiter=","):
"""
Determine all lines in a file are equal.
This function became complicated because of edge cases:
Do not want to flag files as different if the only difference is due to machine precision diffs of floats
Thus, if the files are not immediately found to be the same:
If not, test if the line is a csv that has floats and the difference is due to machine precision.
Be careful if one value is a np.nan, but not the other (the diff evaluates to zero)
If not, return all lines with differences.
@param floc1: file location 1
@param floc2: file location 1
@param delimiter: defaults to CSV
@return: a list of the lines with differences
"""
diff_lines_list = []
# Save diffs to strings to be converted to use csv parser
output_plus = ""
output_neg = ""
with open(floc1, 'r') as file1:
with open(floc2, 'r') as file2:
diff = list(difflib.ndiff(file1.read().splitlines(), file2.read().splitlines()))
for line in diff:
if line.startswith('-') or line.startswith('+'):
diff_lines_list.append(line)
if line.startswith('-'):
output_neg += line[2:]+'\n'
elif line.startswith('+'):
output_plus += line[2:]+'\n'
if len(diff_lines_list) == 0:
return diff_lines_list
warning("Checking for differences between files {} {}".format(floc1, floc2))
try:
# take care of parentheses
for char in ('(', ')', '[', ']'):
output_plus = output_plus.replace(char, delimiter)
output_neg = output_neg.replace(char, delimiter)
# pycharm doesn't know six very well
# noinspection PyCallingNonCallable
diff_plus_lines = list(csv.reader(six.StringIO(output_plus), delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC))
# noinspection PyCallingNonCallable
diff_neg_lines = list(csv.reader(six.StringIO(output_neg), delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC))
except ValueError:
diff_plus_lines = output_plus.split('\n')
diff_neg_lines = output_neg.split('\n')
for diff_list in [diff_plus_lines, diff_neg_lines]:
for line_id in range(len(diff_list)):
diff_list[line_id] = [x.strip() for x in diff_list[line_id].split(delimiter)]
if len(diff_plus_lines) == len(diff_neg_lines):
# if the same number of lines, there is a chance that the difference is only due to difference in
# floating point precision. Check each value of the line, split on whitespace or comma
diff_lines_list = []
for line_plus, line_neg in zip(diff_plus_lines, diff_neg_lines):
if len(line_plus) == len(line_neg):
# print("Checking for differences between: ", line_neg, line_plus)
for item_plus, item_neg in zip(line_plus, line_neg):
try:
item_plus = float(item_plus)
item_neg = float(item_neg)
# if difference greater than the tolerance, the difference is not just precision
# Note: if only one value is nan, the float diff is zero!
# Thus, check for diffs only if neither are nan; show different if only one is nan
diff_vals = False
if np.isnan(item_neg) != np.isnan(item_plus):
diff_vals = True
warning("Comparing '{}' to '{}'.".format(item_plus, item_neg))
elif not (np.isnan(item_neg) and np.isnan(item_plus)):
# noinspection PyTypeChecker
if not np.isclose(item_neg, item_plus, TOL):
diff_vals = True
warning("Values {} and {} differ.".format(item_plus, item_neg))
if diff_vals:
diff_lines_list.append("- " + " ".join(map(str, line_neg)))
diff_lines_list.append("+ " + " ".join(map(str, line_plus)))
break
except ValueError:
# not floats, so the difference is not just precision
if item_plus != item_neg:
diff_lines_list.append("- " + " ".join(map(str, line_neg)))
diff_lines_list.append("+ " + " ".join(map(str, line_plus)))
break
# Not the same number of items in the lines
else:
diff_lines_list.append("- " + " ".join(map(str, line_neg)))
diff_lines_list.append("+ " + " ".join(map(str, line_plus)))
return diff_lines_list
# Data Structures #
def unique_list(a_list):
""" Creates an ordered list from a list of tuples or other hashable items.
From https://code.activestate.com/recipes/576694/#c6
"""
m_map = {}
o_set = []
for item in a_list:
if item not in m_map:
m_map[item] = 1
o_set.append(item)
return o_set
def conv_str_to_func(func_name):
"""
Convert a name of a function into a function, if possible
@param func_name: string to be converted (if possible)
@return: either the function or error
"""
name_func_dict = {"None": None,
"str": str,
"int": int,
"float": float,
"bool": bool,
}
if func_name is None:
return func_name
elif func_name in name_func_dict:
return name_func_dict[func_name]
else:
raise InvalidDataError("Invalid type entry '{}'. Valid options are ")
# Processing LAMMPS files #
def find_dump_section_state(line, sec_timestep=SEC_TIMESTEP, sec_num_atoms=SEC_NUM_ATOMS, sec_box_size=SEC_BOX_SIZE,
sec_atoms=SEC_ATOMS):
atoms_pat = re.compile(r"^ITEM: ATOMS id mol type q x y z.*")
if line == 'ITEM: TIMESTEP':
return sec_timestep
elif line == 'ITEM: NUMBER OF ATOMS':
return sec_num_atoms
elif line == 'ITEM: BOX BOUNDS pp pp pp':
return sec_box_size
elif atoms_pat.match(line):
return sec_atoms
def process_pdb_tpl(tpl_loc):
tpl_data = {NUM_ATOMS: 0, HEAD_CONTENT: [], ATOMS_CONTENT: [], TAIL_CONTENT: []}
atom_id = 0
with open(tpl_loc) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
line_head = line[:PDB_LINE_TYPE_LAST_CHAR]
# head_content to contain Everything before 'Atoms' section
# also capture the number of atoms
# match 5 letters so don't need to set up regex for the ones that have numbers following the letters
# noinspection SpellCheckingInspection
if line_head[:-1] in ['HEADE', 'TITLE', 'REMAR', 'CRYST', 'MODEL', 'COMPN',
'NUMMD', 'ORIGX', 'SCALE', 'SOURC', 'AUTHO', 'CAVEA',
'EXPDT', 'MDLTY', 'KEYWD', 'OBSLT', 'SPLIT', 'SPRSD',
'REVDA', 'JRNL ', 'DBREF', 'SEQRE', 'HET ', 'HETNA',
'HETSY', 'FORMU', 'HELIX', 'SHEET', 'SSBON', 'LINK ',
'CISPE', 'SITE ', ]:
# noinspection PyTypeChecker
tpl_data[HEAD_CONTENT].append(line)
# atoms_content to contain everything but the xyz
elif line_head == 'ATOM ':
# By renumbering, handles the case when a PDB template has ***** after atom_id 99999.
# For renumbering, making sure prints in the correct format, including num of characters:
atom_id += 1
if atom_id > 99999:
atom_num = format(atom_id, 'x')
else:
atom_num = '{:5d}'.format(atom_id)
# Alternately, use this:
# atom_num = line[cfg[PDB_LINE_TYPE_LAST_CHAR]:cfg[PDB_ATOM_NUM_LAST_CHAR]]
atom_type = line[PDB_ATOM_NUM_LAST_CHAR:PDB_ATOM_TYPE_LAST_CHAR]
res_type = line[PDB_ATOM_TYPE_LAST_CHAR:PDB_RES_TYPE_LAST_CHAR]
mol_num = int(line[PDB_RES_TYPE_LAST_CHAR:PDB_MOL_NUM_LAST_CHAR])
pdb_x = float(line[PDB_MOL_NUM_LAST_CHAR:PDB_X_LAST_CHAR])
pdb_y = float(line[PDB_X_LAST_CHAR:PDB_Y_LAST_CHAR])
pdb_z = float(line[PDB_Y_LAST_CHAR:PDB_Z_LAST_CHAR])
last_cols = line[PDB_Z_LAST_CHAR:]
line_struct = [line_head, atom_num, atom_type, res_type, mol_num, pdb_x, pdb_y, pdb_z, last_cols]
# noinspection PyTypeChecker
tpl_data[ATOMS_CONTENT].append(line_struct)
# tail_content to contain everything after the 'Atoms' section
else:
# noinspection PyTypeChecker
tpl_data[TAIL_CONTENT].append(line)
tpl_data[NUM_ATOMS] = len(tpl_data[ATOMS_CONTENT])
return tpl_data
def longest_common_substring(s1, s2):
"""
From https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Longest_common_substring#Python
@param s1: string 1
@param s2: string 2
@return: string: the longest common string!
"""
# noinspection PyUnusedLocal
m = [[0] * (1 + len(s2)) for i in range(1 + len(s1))]
longest, x_longest = 0, 0
for x in range(1, 1 + len(s1)):
for y in range(1, 1 + len(s2)):
if s1[x - 1] == s2[y - 1]:
m[x][y] = m[x - 1][y - 1] + 1
if m[x][y] > longest:
longest = m[x][y]
x_longest = x
else:
m[x][y] = 0
return s1[x_longest - longest: x_longest]
|
team-mayes/md_utils
|
md_utils/md_common.py
|
Python
|
bsd-3-clause
| 48,535
|
[
"CP2K",
"LAMMPS"
] |
c9747256ccb8bbcb566d7f8ad3aba92e339ecacd43fa83437aab3afb2c5465e4
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
""" Location of data files for Pytim examples and tests
====================================================
Real MD simulation data are stored in the ``data/`` subdirectory.
Example: load an example trajectory
>>> import MDAnalysis as mda
>>> import pytim
>>> import numpy as np
>>> from pytim.datafiles import *
>>> u = mda.Universe(WATER_GRO,WATER_XTC)
>>> print(u)
<Universe with 12000 atoms>
Example: list all configurations
>>> for config in sorted(pytim_data.config):
... print("{:20s} {:s}".format(config,pytim_data.description[config]))
CCL4_WATER_GRO Carbon tetrachloride/TIP4p water interface
DPPC_GRO DPPC bilayer
FULLERENE_PDB fullerene
GLUCOSE_PDB solvated beta-d-glucose
LJ_GRO Lennard-Jones liquid/vapour interface
MICELLE_PDB DPC micelle
WATERSMALL_GRO small SPC water/vapour interface
WATER_520K_GRO SPC/E water/vapour interface, 520K
WATER_550K_GRO SPC/E water/vapour interface, 550K
WATER_GRO SPC water/vapour interface
WATER_PDB SPC water/vapour interface
WATER_XYZ SPC water/vapour interface
Example: list all topologies
>>> print(np.sort(pytim_data.topol))
['AMBER03_TOP' 'CHARMM27_TOP' 'G43A1_TOP' 'WATER_LMP_DATA']
Example: list all trajectories
>>> print (np.sort(pytim_data.traj))
['LJ_SHORT_XTC' 'WATER_LMP_XTC' 'WATER_XTC']
Example: list all files, file type, file format and description
>>> for label in pytim_data.label:
... type = pytim_data.type[label]
... format = pytim_data.format[label]
... description = pytim_data.description[label]
"""
from __future__ import print_function
__all__ = [
"CCL4_WATER_GRO", # GROMACS single frame, carbon tetrachloride / water interface
"WATER_GRO", # GROMACS single frame, water/vapour interface
"WATER_LMP_DATA", # LAMMPS topology for WATER_LAMMPS, water/vapour interface
"WATER_LMP_XTC", # LAMMPS trajectory, water/vapour interface
"WATER_PDB", # PDB single frame, water/vapour interface, same as WATER_GRO
"WATER_XYZ", # XYZ single frame, water/vapour interface, same as WATER_GRO
"WATERSMALL_GRO", # GROMACS single frame, SPC water/vapour interface
"WATER_520K_GRO", # GROMACS single frame, SPC/E water/vapour interface,520 K
"WATER_550K_GRO", # GROMACS single frame, SPC/E water/vapour interface,550 K
"METHANOL_GRO", # methanol/vapour interface with molecules in the vapour phase
"ILBENZENE_GRO", # Ionic liquid/benzene, partial miscibility
"ANTAGONISTIC_GRO", # 3-Methylpyridine, Sodium Tetraphenylborate and water
"LJ_GRO", # Lennard-Jones liquid/vapour interface
"LJ_SHORT_XTC", # Lennard-Jones liquid/vapour interface trajectory
"MICELLE_PDB", # PDB of dodecylphosphocholine micelle in water
"FULLERENE_PDB", # PDB of C60
"DPPC_GRO", # GROMACS single frame of a dppc bilayer
"GLUCOSE_PDB", # PDB of solvated beta-d-glucose
"WATER_XTC", # GROMACS trajectory, 100 frames, water/vapour interface
"G43A1_TOP", # GROMOS 43a1 nonbonded parameters, from the gromacs distribution
"AMBER03_TOP", # AMBER03 nonbonded parameters, from the gromacs distribution
"CHARMM27_TOP", # CHARM27 nonbonded parameters, from the gromacs distribution
"pytim_data", # class to access the data
"_TEST_BCC_GRO", # test file
"_TEST_ORIENTATION_GRO", # test file
"_TEST_PROFILE_GRO", # test file
]
from pkg_resources import resource_filename
import tempfile
import re as re
import urllib
try:
from urllib import urlopen as urlopen
except:
from urllib.request import urlopen as urlopen
class Data(object):
"""" a class for storing/accessing configurations, trajectories, topologies
"""
@staticmethod
def sigeps(data, input_type):
nm2angs = 10.0
a, b = float(data[5]), float(data[6])
sigma = 0
if input_type == 'c6c12':
c6, c12 = a, b
if (c6 > 0.0):
sigma = (c12 / c6)**(1. / 6.)
else:
sigma = a
return sigma * nm2angs
@staticmethod
def fetch(name, tmpdir=None):
""" Fetch a sample trajectory from the github repository.
Have a look at https://github.com/Marcello-Sega/pytim/raw/extended_datafiles/files/
for the available files
Example:
>>> import MDAnalysis as mda
>>> import pytim
>>> from pytim.datafiles import WATERSMALL_GRO
>>> # tmpdir here is specified only for travis
>>> import os
>>> WATERSMALL_TRR = pytim.datafiles.pytim_data.fetch('WATERSMALL_TRR',tmpdir='./')
checking presence of a cached copy... not found. Fetching remote file... done.
>>> u = mda.Universe(WATERSMALL_GRO,WATERSMALL_TRR)
>>> os.unlink('./'+WATERSMALL_TRR)
>>> print(u)
<Universe with 648 atoms>
"""
filename = name.replace("_", ".")
if tmpdir is None:
dirname = tempfile.gettempdir()
else:
dirname = tmpdir
urlbase_md5 = 'https://raw.githubusercontent.com/Marcello-Sega/pytim/extended_datafiles/files/'
urlbase = 'https://github.com/Marcello-Sega/pytim/raw/extended_datafiles/files/'
print("checking presence of a cached copy...", end=' ')
try:
with urlopen(urlbase_md5 + filename + '.MD5') as handle:
md5 = handle.read()
md5_local = hashlib.md5(open(dirname + filename,
'rb').read()).hexdigest()
if md5_local in md5:
print("found")
return dirname + filename
except BaseException:
pass
print("not found. Fetching remote file...", end=' ')
newfile = urlopen(urlbase + filename + '?raw=true')
with open(dirname + filename, 'wb') as output:
output.write(newfile.read())
print("done.")
return dirname + filename
def _generate_data_property(self, name):
labels = []
for label in self.type.keys():
if self.type[label] == name:
labels.append(label)
#labels = [label for label, val in self.type.iteritems() if val == name]
return list(set(labels) & set(self.label))
@property
def config(self):
return self._generate_data_property('config')
@property
def topol(self):
return self._generate_data_property('topol')
@property
def traj(self):
return self._generate_data_property('traj')
def __init__(self):
self._label = list()
self.label = list()
self.file = dict()
self.type = dict()
self.format = dict()
self.description = dict()
def add(self, label, filetype, fileformat, desc):
self._label.append(label)
if label[0] != '_':
self.label.append(label)
self.file[label] = globals()[label]
file = self.file[label]
self.type[file] = filetype
self.type[label] = filetype
self.format[file] = fileformat
self.format[label] = fileformat
self.description[file] = desc
self.description[label] = desc
def vdwradii(self, filename):
if self.type[filename] == 'topol' and self.format[filename] == 'GMX':
return self._vdwradii_gmx(filename)
def _vdwradii_gmx(self, filename):
with open(filename) as f:
input_type = 'sigeps'
content = f.read()
if re.match('.*name.*c6 *c12.*', content.replace('\n', ' ')):
input_type = 'c6c12'
f.seek(0)
scan = False
radii = dict()
for line in f:
if (scan and re.match('^ *\[', line)):
return radii
if (scan):
try:
data = (line.split(";")[0]).split()
atom = data[0]
radii[atom] = 0.5 * self.sigeps(data, input_type)
except IndexError:
pass
if (re.match('^ *\[ *atomtypes *\]', line)):
scan = True
return radii
pytim_data = Data()
# NOTE: to add a new datafile, make sure it is listed in setup.py (in the root directory)
# in the package_data option (a glob like 'data/*' is usually enough)
CCL4_WATER_GRO = resource_filename('pytim', 'data/CCL4.H2O.GRO')
pytim_data.add('CCL4_WATER_GRO', 'config', 'GRO',
'Carbon tetrachloride/TIP4p water interface')
WATER_GRO = resource_filename('pytim', 'data/water.gro')
pytim_data.add('WATER_GRO', 'config', 'GRO', 'SPC water/vapour interface')
WATER_LMP_XTC = resource_filename('pytim', 'data/water_lmp.xtc')
pytim_data.add('WATER_LMP_XTC', 'traj', 'LAMMPS', 'SPC water/vapour interface')
WATER_PDB = resource_filename('pytim', 'data/water.pdb')
pytim_data.add('WATER_PDB', 'config', 'PDB', 'SPC water/vapour interface')
WATER_XYZ = resource_filename('pytim', 'data/water.xyz')
pytim_data.add('WATER_XYZ', 'config', 'XYZ', 'SPC water/vapour interface')
MICELLE_PDB = resource_filename('pytim', 'data/micelle.pdb')
pytim_data.add('MICELLE_PDB', 'config', 'GRO', 'DPC micelle')
FULLERENE_PDB = resource_filename('pytim', 'data/fullerene.pdb')
pytim_data.add('FULLERENE_PDB', 'config', 'PDB', 'fullerene')
DPPC_GRO = resource_filename('pytim', 'data/dppc.gro')
pytim_data.add('DPPC_GRO', 'config', 'GRO', 'DPPC bilayer')
GLUCOSE_PDB = resource_filename('pytim', 'data/glucose.pdb')
pytim_data.add('GLUCOSE_PDB', 'config', 'PDB', 'solvated beta-d-glucose')
LJ_GRO = resource_filename('pytim', 'data/LJ.gro')
pytim_data.add('LJ_GRO', 'config', 'GRO',
'Lennard-Jones liquid/vapour interface')
LJ_SHORT_XTC = resource_filename('pytim', 'data/LJ.short.xtc')
pytim_data.add('LJ_SHORT_XTC', 'traj', 'XTC', 'LJ liquid/vapour interface')
WATERSMALL_GRO = resource_filename('pytim', 'data/water-small.gro')
pytim_data.add('WATERSMALL_GRO', 'config', 'GRO',
'small SPC water/vapour interface')
WATER_520K_GRO = resource_filename('pytim', 'data/water_520K.gro')
pytim_data.add('WATER_520K_GRO', 'config', 'GRO',
'SPC/E water/vapour interface, 520K')
WATER_550K_GRO = resource_filename('pytim', 'data/water_550K.gro')
pytim_data.add('WATER_550K_GRO', 'config', 'GRO',
'SPC/E water/vapour interface, 550K')
METHANOL_GRO = resource_filename('pytim', 'data/methanol.gro')
pytim_data.add('METHANOL_GRO', 'conf', 'GRO', 'methanol/vapour interface')
ILBENZENE_GRO = resource_filename('pytim', 'data/ilbenzene.gro')
pytim_data.add('ILBENZENE_GRO', 'conf', 'GRO', 'BMIM PF4 / benzene interface')
ANTAGONISTIC_GRO = resource_filename('pytim', 'data/antagonistic.gro')
pytim_data.add('ANTAGONISTIC_GRO', 'conf', 'GRO', '3-Methylpyridine, Sodium Tetraphenylborate and water')
WATER_XTC = resource_filename('pytim', 'data/water.xtc')
pytim_data.add('WATER_XTC', 'traj', 'XTC',
'SPC water/vapour interface trajectory')
_TEST_BCC_GRO = resource_filename('pytim', 'data/_test_bcc.gro')
pytim_data.add('_TEST_BCC_GRO', 'config', 'GRO', 'test file')
_TEST_ORIENTATION_GRO = resource_filename('pytim',
'data/_test_orientation.gro')
pytim_data.add('_TEST_ORIENTATION_GRO', 'config', 'GRO', 'test file')
_TEST_PROFILE_GRO = resource_filename('pytim', 'data/_test_profile.gro')
pytim_data.add('_TEST_PROFILE_GRO', 'config', 'GRO', 'test file')
WATER_LMP_DATA = resource_filename('pytim', 'data/water_lmp.data')
pytim_data.add('WATER_LMP_DATA', 'topol', 'DATA',
'LAMMPS topology for WATER_LAMMPS')
G43A1_TOP = resource_filename('pytim', 'data/ffg43a1.nonbonded.itp')
pytim_data.add('G43A1_TOP', 'topol', 'GMX', 'GROMOS 43A1 topology for GROMACS')
AMBER03_TOP = resource_filename('pytim', 'data/ffamber03.nonbonded.itp')
pytim_data.add('AMBER03_TOP', 'topol', 'GMX', 'AMBER 03 topology for GROMACS')
CHARMM27_TOP = resource_filename('pytim', 'data/ffcharmm27.nonbonded.itp')
pytim_data.add('CHARMM27_TOP', 'topol', 'GMX',
'CHARMM 27 topology for GROMACS')
# This should be the last line: clean up namespace
del resource_filename
|
Marcello-Sega/pytim
|
pytim/datafiles/__init__.py
|
Python
|
gpl-3.0
| 12,583
|
[
"Amber",
"CHARMM",
"GROMOS",
"Gromacs",
"LAMMPS",
"MDAnalysis"
] |
f4a11b9f60b737fd8becffe1f9cf1ee77110dcf6865335048a8bc7a7f4f122c5
|
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from html5lib import treebuilders, inputstream
from xhtml2pdf.default import TAGS, STRING, INT, BOOL, SIZE, COLOR, FILE
from xhtml2pdf.default import BOX, POS, MUST, FONT
from xhtml2pdf.util import getSize, getBool, toList, getColor, getAlign
from xhtml2pdf.util import getBox, getPos, pisaTempFile
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import PageBreak, KeepInFrame
from xhtml2pdf.xhtml2pdf_reportlab import PmlRightPageBreak, PmlLeftPageBreak
from xhtml2pdf.tags import * # TODO: Kill wild import!
from xhtml2pdf.tables import * # TODO: Kill wild import!
from xhtml2pdf.util import * # TODO: Kill wild import!
from xml.dom import Node
import copy
import html5lib
import logging
import re
import types
import xhtml2pdf.w3c.cssDOMElementInterface as cssDOMElementInterface
import xml.dom.minidom
CSSAttrCache={}
log = logging.getLogger("xhtml2pdf")
rxhttpstrip = re.compile("https?://[^/]+(.*)", re.M | re.I)
class AttrContainer(dict):
def __getattr__(self, name):
try:
return dict.__getattr__(self, name)
except:
return self[name]
def pisaGetAttributes(c, tag, attributes):
global TAGS
attrs = {}
if attributes:
for k, v in attributes.items():
try:
attrs[str(k)] = str(v) # XXX no Unicode! Reportlab fails with template names
except:
attrs[k] = v
nattrs = {}
if TAGS.has_key(tag):
block, adef = TAGS[tag]
adef["id"] = STRING
# print block, adef
for k, v in adef.items():
nattrs[k] = None
# print k, v
# defaults, wenn vorhanden
if type(v) == types.TupleType:
if v[1] == MUST:
if not attrs.has_key(k):
log.warn(c.warning("Attribute '%s' must be set!", k))
nattrs[k] = None
continue
nv = attrs.get(k, v[1])
dfl = v[1]
v = v[0]
else:
nv = attrs.get(k, None)
dfl = None
if nv is not None:
if type(v) == types.ListType:
nv = nv.strip().lower()
if nv not in v:
#~ raise PML_EXCEPTION, "attribute '%s' of wrong value, allowed is one of: %s" % (k, repr(v))
log.warn(c.warning("Attribute '%s' of wrong value, allowed is one of: %s", k, repr(v)))
nv = dfl
elif v == BOOL:
nv = nv.strip().lower()
nv = nv in ("1", "y", "yes", "true", str(k))
elif v == SIZE:
try:
nv = getSize(nv)
except:
log.warn(c.warning("Attribute '%s' expects a size value", k))
elif v == BOX:
nv = getBox(nv, c.pageSize)
elif v == POS:
nv = getPos(nv, c.pageSize)
elif v == INT:
nv = int(nv)
elif v == COLOR:
nv = getColor(nv)
elif v == FILE:
nv = c.getFile(nv)
elif v == FONT:
nv = c.getFontName(nv)
nattrs[k] = nv
#for k in attrs.keys():
# if not nattrs.has_key(k):
# c.warning("attribute '%s' for tag <%s> not supported" % (k, tag))
#else:
# c.warning("tag <%s> is not supported" % tag)
return AttrContainer(nattrs)
attrNames = '''
color
font-family
font-size
font-weight
font-style
text-decoration
line-height
background-color
display
margin-left
margin-right
margin-top
margin-bottom
padding-left
padding-right
padding-top
padding-bottom
border-top-color
border-top-style
border-top-width
border-bottom-color
border-bottom-style
border-bottom-width
border-left-color
border-left-style
border-left-width
border-right-color
border-right-style
border-right-width
text-align
vertical-align
width
height
zoom
page-break-after
page-break-before
list-style-type
list-style-image
white-space
text-indent
-pdf-page-break
-pdf-frame-break
-pdf-next-page
-pdf-keep-with-next
-pdf-outline
-pdf-outline-level
-pdf-outline-open
-pdf-line-spacing
-pdf-keep-in-frame-mode
-pdf-word-wrap
'''.strip().split()
def getCSSAttr(self, cssCascade, attrName, default=NotImplemented):
if attrName in self.cssAttrs:
return self.cssAttrs[attrName]
try:
result = cssCascade.findStyleFor(self.cssElement, attrName, default)
except LookupError:
result = None
# XXX Workaround for inline styles
try:
style = self.cssStyle
except:
style = self.cssStyle = cssCascade.parser.parseInline(self.cssElement.getStyleAttr() or '')[0]
if style.has_key(attrName):
result = style[attrName]
if result == 'inherit':
if hasattr(self.parentNode, 'getCSSAttr'):
result = self.parentNode.getCSSAttr(cssCascade, attrName, default)
elif default is not NotImplemented:
return default
else:
raise LookupError("Could not find inherited CSS attribute value for '%s'" % (attrName,))
if result is not None:
self.cssAttrs[attrName] = result
return result
#TODO: Monkeypatching standard lib should go away.
xml.dom.minidom.Element.getCSSAttr = getCSSAttr
def getCSSAttrCacheKey(node):
_cl = _id = _st = ''
for i in node.attributes.items():
if i[0] == 'class':
_cl = i[1]
elif i[0] == 'id':
_id = i[1]
elif i[0] == 'style':
_st = i[1]
return "%s#%s#%s#%s" % (id(node.parentNode), _cl, _id, _st)
def CSSCollect(node, c):
#node.cssAttrs = {}
#return node.cssAttrs
if c.css:
_key = getCSSAttrCacheKey(node)
if hasattr(node.parentNode, "tagName"):
if node.parentNode.tagName.lower() != "html":
CachedCSSAttr = CSSAttrCache.get(_key, None)
if CachedCSSAttr is not None:
return CachedCSSAttr
node.cssElement = cssDOMElementInterface.CSSDOMElementInterface(node)
node.cssAttrs = {}
# node.cssElement.onCSSParserVisit(c.cssCascade.parser)
cssAttrMap = {}
for cssAttrName in attrNames:
try:
cssAttrMap[cssAttrName] = node.getCSSAttr(c.cssCascade, cssAttrName)
#except LookupError:
# pass
except Exception: # TODO: Kill this catch-all!
log.debug("CSS error '%s'", cssAttrName, exc_info=1)
CSSAttrCache[_key] = node.cssAttrs
return node.cssAttrs
def CSS2Frag(c, kw, isBlock):
# COLORS
if c.cssAttr.has_key("color"):
c.frag.textColor = getColor(c.cssAttr["color"])
if c.cssAttr.has_key("background-color"):
c.frag.backColor = getColor(c.cssAttr["background-color"])
# FONT SIZE, STYLE, WEIGHT
if c.cssAttr.has_key("font-family"):
c.frag.fontName = c.getFontName(c.cssAttr["font-family"])
if c.cssAttr.has_key("font-size"):
# XXX inherit
c.frag.fontSize = max(getSize("".join(c.cssAttr["font-size"]), c.frag.fontSize, c.baseFontSize), 1.0)
if c.cssAttr.has_key("line-height"):
leading = "".join(c.cssAttr["line-height"])
c.frag.leading = getSize(leading, c.frag.fontSize)
c.frag.leadingSource = leading
else:
c.frag.leading = getSize(c.frag.leadingSource, c.frag.fontSize)
if c.cssAttr.has_key("-pdf-line-spacing"):
c.frag.leadingSpace = getSize("".join(c.cssAttr["-pdf-line-spacing"]))
# print "line-spacing", c.cssAttr["-pdf-line-spacing"], c.frag.leading
if c.cssAttr.has_key("font-weight"):
value = c.cssAttr["font-weight"].lower()
if value in ("bold", "bolder", "500", "600", "700", "800", "900"):
c.frag.bold = 1
else:
c.frag.bold = 0
for value in toList(c.cssAttr.get("text-decoration", "")):
if "underline" in value:
c.frag.underline = 1
if "line-through" in value:
c.frag.strike = 1
if "none" in value:
c.frag.underline = 0
c.frag.strike = 0
if c.cssAttr.has_key("font-style"):
value = c.cssAttr["font-style"].lower()
if value in ("italic", "oblique"):
c.frag.italic = 1
else:
c.frag.italic = 0
if c.cssAttr.has_key("white-space"):
# normal | pre | nowrap
c.frag.whiteSpace = str(c.cssAttr["white-space"]).lower()
# ALIGN & VALIGN
if c.cssAttr.has_key("text-align"):
c.frag.alignment = getAlign(c.cssAttr["text-align"])
if c.cssAttr.has_key("vertical-align"):
c.frag.vAlign = c.cssAttr["vertical-align"]
# HEIGHT & WIDTH
if c.cssAttr.has_key("height"):
c.frag.height = "".join(toList(c.cssAttr["height"])) # XXX Relative is not correct!
if c.frag.height in ("auto",):
c.frag.height = None
if c.cssAttr.has_key("width"):
# print c.cssAttr["width"]
c.frag.width = "".join(toList(c.cssAttr["width"])) # XXX Relative is not correct!
if c.frag.width in ("auto",):
c.frag.width = None
# ZOOM
if c.cssAttr.has_key("zoom"):
# print c.cssAttr["width"]
zoom = "".join(toList(c.cssAttr["zoom"])) # XXX Relative is not correct!
if zoom.endswith("%"):
zoom = float(zoom[: - 1]) / 100.0
c.frag.zoom = float(zoom)
# MARGINS & LIST INDENT, STYLE
if isBlock:
if c.cssAttr.has_key("margin-top"):
c.frag.spaceBefore = getSize(c.cssAttr["margin-top"], c.frag.fontSize)
if c.cssAttr.has_key("margin-bottom"):
c.frag.spaceAfter = getSize(c.cssAttr["margin-bottom"], c.frag.fontSize)
if c.cssAttr.has_key("margin-left"):
c.frag.bulletIndent = kw["margin-left"] # For lists
kw["margin-left"] += getSize(c.cssAttr["margin-left"], c.frag.fontSize)
c.frag.leftIndent = kw["margin-left"]
# print "MARGIN LEFT", kw["margin-left"], c.frag.bulletIndent
if c.cssAttr.has_key("margin-right"):
kw["margin-right"] += getSize(c.cssAttr["margin-right"], c.frag.fontSize)
c.frag.rightIndent = kw["margin-right"]
# print c.frag.rightIndent
if c.cssAttr.has_key("text-indent"):
c.frag.firstLineIndent = getSize(c.cssAttr["text-indent"], c.frag.fontSize)
if c.cssAttr.has_key("list-style-type"):
c.frag.listStyleType = str(c.cssAttr["list-style-type"]).lower()
if c.cssAttr.has_key("list-style-image"):
c.frag.listStyleImage = c.getFile(c.cssAttr["list-style-image"])
# PADDINGS
if isBlock:
if c.cssAttr.has_key("padding-top"):
c.frag.paddingTop = getSize(c.cssAttr["padding-top"], c.frag.fontSize)
if c.cssAttr.has_key("padding-bottom"):
c.frag.paddingBottom = getSize(c.cssAttr["padding-bottom"], c.frag.fontSize)
if c.cssAttr.has_key("padding-left"):
c.frag.paddingLeft = getSize(c.cssAttr["padding-left"], c.frag.fontSize)
if c.cssAttr.has_key("padding-right"):
c.frag.paddingRight = getSize(c.cssAttr["padding-right"], c.frag.fontSize)
# BORDERS
if isBlock:
if c.cssAttr.has_key("border-top-width"):
# log.debug(c.cssAttr["border-top-width"])
c.frag.borderTopWidth = getSize(c.cssAttr["border-top-width"], c.frag.fontSize)
if c.cssAttr.has_key("border-bottom-width"):
c.frag.borderBottomWidth = getSize(c.cssAttr["border-bottom-width"], c.frag.fontSize)
if c.cssAttr.has_key("border-left-width"):
c.frag.borderLeftWidth = getSize(c.cssAttr["border-left-width"], c.frag.fontSize)
if c.cssAttr.has_key("border-right-width"):
c.frag.borderRightWidth = getSize(c.cssAttr["border-right-width"], c.frag.fontSize)
if c.cssAttr.has_key("border-top-style"):
c.frag.borderTopStyle = c.cssAttr["border-top-style"]
if c.cssAttr.has_key("border-bottom-style"):
c.frag.borderBottomStyle = c.cssAttr["border-bottom-style"]
if c.cssAttr.has_key("border-left-style"):
c.frag.borderLeftStyle = c.cssAttr["border-left-style"]
if c.cssAttr.has_key("border-right-style"):
c.frag.borderRightStyle = c.cssAttr["border-right-style"]
if c.cssAttr.has_key("border-top-color"):
c.frag.borderTopColor = getColor(c.cssAttr["border-top-color"])
if c.cssAttr.has_key("border-bottom-color"):
c.frag.borderBottomColor = getColor(c.cssAttr["border-bottom-color"])
if c.cssAttr.has_key("border-left-color"):
c.frag.borderLeftColor = getColor(c.cssAttr["border-left-color"])
if c.cssAttr.has_key("border-right-color"):
c.frag.borderRightColor = getColor(c.cssAttr["border-right-color"])
def pisaPreLoop(node, context, collect=False):
"""
Collect all CSS definitions
"""
data = u""
if node.nodeType == Node.TEXT_NODE and collect:
data = node.data
elif node.nodeType == Node.ELEMENT_NODE:
name = node.tagName.lower()
# print name, node.attributes.items()
if name in ("style", "link"):
attr = pisaGetAttributes(context, name, node.attributes)
# print " ", attr
media = [x.strip() for x in attr.media.lower().split(",") if x.strip()]
# print repr(media)
if (attr.get("type", "").lower() in ("", "text/css") and (
not media or
"all" in media or
"print" in media or
"pdf" in media)):
if name == "style":
for node in node.childNodes:
data += pisaPreLoop(node, context, collect=True)
context.addCSS(data)
return u""
#collect = True
if name == "link" and attr.href and attr.rel.lower() == "stylesheet":
# print "CSS LINK", attr
context.addCSS('\n@import "%s" %s;' % (attr.href, ",".join(media)))
# context.addCSS(unicode(file(attr.href, "rb").read(), attr.charset))
#else:
# print node.nodeType
for node in node.childNodes:
result = pisaPreLoop(node, context, collect=collect)
if collect:
data += result
return data
def pisaLoop(node, context, path=[], **kw):
# Initialize KW
if not kw:
kw = {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}
else:
kw = copy.copy(kw)
# indent = len(path) * " " # only used for debug print statements
# TEXT
if node.nodeType == Node.TEXT_NODE:
# print indent, "#", repr(node.data) #, context.frag
context.addFrag(node.data)
# context.text.append(node.value)
# ELEMENT
elif node.nodeType == Node.ELEMENT_NODE:
node.tagName = node.tagName.replace(":", "").lower()
if node.tagName in ("style", "script"):
return
path = copy.copy(path) + [node.tagName]
# Prepare attributes
attr = pisaGetAttributes(context, node.tagName, node.attributes)
# log.debug(indent + "<%s %s>" % (node.tagName, attr) + repr(node.attributes.items())) #, path
# Calculate styles
context.cssAttr = CSSCollect(node, context)
context.node = node
# Block?
PAGE_BREAK = 1
PAGE_BREAK_RIGHT = 2
PAGE_BREAK_LEFT = 3
pageBreakAfter = False
frameBreakAfter = False
display = context.cssAttr.get("display", "inline").lower()
# print indent, node.tagName, display, context.cssAttr.get("background-color", None), attr
isBlock = (display == "block")
if isBlock:
context.addPara()
# Page break by CSS
if context.cssAttr.has_key("-pdf-next-page"):
context.addStory(NextPageTemplate(str(context.cssAttr["-pdf-next-page"])))
if context.cssAttr.has_key("-pdf-page-break"):
if str(context.cssAttr["-pdf-page-break"]).lower() == "before":
context.addStory(PageBreak())
if context.cssAttr.has_key("-pdf-frame-break"):
if str(context.cssAttr["-pdf-frame-break"]).lower() == "before":
context.addStory(FrameBreak())
if str(context.cssAttr["-pdf-frame-break"]).lower() == "after":
frameBreakAfter = True
if context.cssAttr.has_key("page-break-before"):
if str(context.cssAttr["page-break-before"]).lower() == "always":
context.addStory(PageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "right":
context.addStory(PageBreak())
context.addStory(PmlRightPageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "left":
context.addStory(PageBreak())
context.addStory(PmlLeftPageBreak())
if context.cssAttr.has_key("page-break-after"):
if str(context.cssAttr["page-break-after"]).lower() == "always":
pageBreakAfter = PAGE_BREAK
if str(context.cssAttr["page-break-after"]).lower() == "right":
pageBreakAfter = PAGE_BREAK_RIGHT
if str(context.cssAttr["page-break-after"]).lower() == "left":
pageBreakAfter = PAGE_BREAK_LEFT
if display == "none":
# print "none!"
return
# Translate CSS to frags
# Save previous frag styles
context.pushFrag()
# Map styles to Reportlab fragment properties
CSS2Frag(context, kw, isBlock)
# EXTRAS
if context.cssAttr.has_key("-pdf-keep-with-next"):
context.frag.keepWithNext = getBool(context.cssAttr["-pdf-keep-with-next"])
if context.cssAttr.has_key("-pdf-outline"):
context.frag.outline = getBool(context.cssAttr["-pdf-outline"])
if context.cssAttr.has_key("-pdf-outline-level"):
context.frag.outlineLevel = int(context.cssAttr["-pdf-outline-level"])
if context.cssAttr.has_key("-pdf-outline-open"):
context.frag.outlineOpen = getBool(context.cssAttr["-pdf-outline-open"])
if context.cssAttr.has_key("-pdf-word-wrap"):
context.frag.wordWrap = context.cssAttr["-pdf-word-wrap"]
# handle keep-in-frame
keepInFrameMode = None
keepInFrameMaxWidth = 0
keepInFrameMaxHeight = 0
if context.cssAttr.has_key("-pdf-keep-in-frame-mode"):
value = str(context.cssAttr["-pdf-keep-in-frame-mode"]).strip().lower()
if value in ("shrink", "error", "overflow", "truncate"):
keepInFrameMode = value
if context.cssAttr.has_key("-pdf-keep-in-frame-max-width"):
keepInFrameMaxWidth = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-width"]))
if context.cssAttr.has_key("-pdf-keep-in-frame-max-height"):
keepInFrameMaxHeight = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-height"]))
# ignore nested keep-in-frames, tables have their own KIF handling
keepInFrame = keepInFrameMode is not None and context.keepInFrameIndex is None
if keepInFrame:
# keep track of current story index, so we can wrap everythink
# added after this point in a KeepInFrame
context.keepInFrameIndex = len(context.story)
# BEGIN tag
klass = globals().get("pisaTag%s" % node.tagName.replace(":", "").upper(), None)
obj = None
# Static block
elementId = attr.get("id", None)
staticFrame = context.frameStatic.get(elementId, None)
if staticFrame:
context.frag.insideStaticFrame += 1
oldStory = context.swapStory()
# Tag specific operations
if klass is not None:
obj = klass(node, attr)
obj.start(context)
# Visit child nodes
context.fragBlock = fragBlock = copy.copy(context.frag)
for nnode in node.childNodes:
pisaLoop(nnode, context, path, **kw)
context.fragBlock = fragBlock
# END tag
if obj:
obj.end(context)
# Block?
if isBlock:
context.addPara()
# XXX Buggy!
# Page break by CSS
if pageBreakAfter:
context.addStory(PageBreak())
if pageBreakAfter == PAGE_BREAK_RIGHT:
context.addStory(PmlRightPageBreak())
if pageBreakAfter == PAGE_BREAK_LEFT:
context.addStory(PmlLeftPageBreak())
if frameBreakAfter:
context.addStory(FrameBreak())
if keepInFrame:
# get all content added after start of -pdf-keep-in-frame and wrap
# it in a KeepInFrame
substory = context.story[context.keepInFrameIndex:]
context.story = context.story[:context.keepInFrameIndex]
context.story.append(
KeepInFrame(
content=substory,
maxWidth=keepInFrameMaxWidth,
maxHeight=keepInFrameMaxHeight))
context.keepInFrameIndex = None
# Static block, END
if staticFrame:
context.addPara()
for frame in staticFrame:
frame.pisaStaticStory = context.story
context.swapStory(oldStory)
context.frag.insideStaticFrame -= 1
# context.debug(1, indent, "</%s>" % (node.tagName))
# Reset frag style
context.pullFrag()
# Unknown or not handled
else:
# context.debug(1, indent, "???", node, node.nodeType, repr(node))
# Loop over children
for node in node.childNodes:
pisaLoop(node, context, path, **kw)
def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None):
"""
- Parse HTML and get miniDOM
- Extract CSS informations, add default CSS, parse CSS
- Handle the document DOM itself and build reportlab story
- Return Context object
"""
CSSAttrCache={}
if xhtml:
#TODO: XHTMLParser doesn't see to exist...
parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom"))
else:
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
if type(src) in types.StringTypes:
if type(src) is types.UnicodeType:
encoding = "utf8"
src = src.encode(encoding)
src = pisaTempFile(src, capacity=context.capacity)
# Test for the restrictions of html5lib
if encoding:
# Workaround for html5lib<0.11.1
if hasattr(inputstream, "isValidEncoding"):
if encoding.strip().lower() == "utf8":
encoding = "utf-8"
if not inputstream.isValidEncoding(encoding):
log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding)
else:
if inputstream.codecName(encoding) is None:
log.error("%r is not a valid encoding", encoding)
document = parser.parse(
src,
encoding=encoding)
if xml_output:
xml_output.write(document.toprettyxml(encoding="utf8"))
if default_css:
context.addCSS(default_css)
pisaPreLoop(document, context)
#try:
context.parseCSS()
#except:
# context.cssText = DEFAULT_CSS
# context.parseCSS()
# context.debug(9, pprint.pformat(context.css))
pisaLoop(document, context)
return context
# Shortcuts
HTML2PDF = pisaParser
def XHTML2PDF(*a, **kw):
kw["xhtml"] = True
return HTML2PDF(*a, **kw)
XML2PDF = XHTML2PDF
|
ibyer/xhtml2pdf
|
xhtml2pdf/parser.py
|
Python
|
apache-2.0
| 25,074
|
[
"VisIt"
] |
cee610398c21d65e33eae0d2173b2de2d4d5356738b9113d531fa12f1675ce89
|
# Copyright 2008-2009 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of django-facebookconnect.
#
# django-facebookconnect is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# django-facebookconnect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with django-facebookconnect. If not, see <http://www.gnu.org/licenses/>.
from django import template
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from facebook.djangofb import get_facebook_client
from facebookconnect.models import FacebookTemplate,FacebookProfile
register = template.Library()
@register.inclusion_tag('facebook/js.html')
def initialize_facebook_connect():
return {'facebook_api_key': settings.FACEBOOK_API_KEY}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_name(context,user):
if isinstance(user,FacebookProfile):
p = user
else:
p = user.facebook_profile
if getattr(settings,'WIDGET_MODE',None):
#if we're rendering widgets, link direct to facebook
return {'string':u'<fb:name uid="%s" />' % (p.facebook_id)}
else:
return {'string':u'<a href="%s">%s</a>' % (p.get_absolute_url(),p.full_name)}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_first_name(context,user):
if isinstance(user,FacebookProfile):
p = user
else:
p = user.facebook_profile
if getattr(settings,'WIDGET_MODE',None):
#if we're rendering widgets, link direct to facebook
return {'string':u'<fb:name uid="%s" firstnameonly="true" />' % (p.facebook_id)}
else:
return {'string':u'<a href="%s">%s</a>' % (p.get_absolute_url(),p.first_name)}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_possesive(context,user):
if isinstance(user,FacebookProfile):
p = user
else:
p = user.facebook_profile
return {'string':u'<fb:name uid="%i" possessive="true" linked="false"></fb:name>' % p.facebook_id}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_greeting(context,user):
if isinstance(user,FacebookProfile):
p = user
else:
p = user.facebook_profile
if getattr(settings,'WIDGET_MODE',None):
#if we're rendering widgets, link direct to facebook
return {'string':u'Hello, <fb:name uid="%s" useyou="false" firstnameonly="true" />' % (p.facebook_id)}
else:
return {'string':u'Hello, <a href="%s">%s</a>!' % (p.get_absolute_url(),p.first_name)}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_status(context,user):
if isinstance(user,FacebookProfile):
p = user
else:
p = user.facebook_profile
return {'string':p.status}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_photo(context,user):
if isinstance(user,FacebookProfile):
p = user
else:
p = user.facebook_profile
if getattr(settings,'WIDGET_MODE',None):
#if we're rendering widgets, link direct to facebook
return {'string':u'<fb:profile_pic uid="%s" facebook-logo="true" />' % (p.facebook_id)}
else:
return {'string':u'<a href="%s"><img src="%s" alt="%s"/></a>' % (p.get_absolute_url(), p.picture_url, p.full_name)}
@register.inclusion_tag('facebook/display.html',takes_context=True)
def show_facebook_info(context,user):
if isinstance(user,FacebookProfile):
p = user
else:
p = user.facebook_profile
return {'profile_url':p.get_absolute_url(), 'picture_url':p.picture_url, 'full_name':p.full_name,'networks':p.networks}
@register.inclusion_tag('facebook/mosaic.html')
def show_profile_mosaic(profiles):
return {'profiles':profiles}
@register.inclusion_tag('facebook/connect_button.html',takes_context=True)
def show_connect_button(context,javascript_friendly=False):
if 'next' in context:
next = context['next']
else:
next = ''
return {'next':next,'javascript_friendly':javascript_friendly}
@register.simple_tag
def facebook_js():
return '<script src="http://static.ak.connect.facebook.com/js/api_lib/v0.4/FeatureLoader.js.php" type="text/javascript"></script>'
@register.simple_tag
def show_logout():
o = reverse('facebook_logout')
return '<a href="%s" onclick="FB.Connect.logoutAndRedirect(\'%s\');return false;">logout</a>' % (o,o) #hoot!
@register.filter()
def js_string(value):
import re
return re.sub(r'[\r\n]+','',value)
@register.inclusion_tag('facebook/invite.html')
def show_invite_link(invitation_template="facebook/invitation.fbml",show_link=True):
"""display an invite friends link"""
fb = get_facebook_client()
current_site = Site.objects.get_current()
content = render_to_string(invitation_template,
{ 'inviter': fb.uid,
'url': fb.get_add_url(),
'site': current_site })
from cgi import escape
content = escape(content, True)
facebook_uid = fb.uid
fql = "SELECT uid FROM user WHERE uid IN (SELECT uid2 FROM friend WHERE uid1='%s') AND has_added_app = 1" % fb.uid
result = fb.fql.query(fql)
# Extract the user ID's returned in the FQL request into a new array.
if result and isinstance(result, list):
friends_list = map(lambda x: str(x['uid']), result)
else: friends_list = []
# Convert the array of friends into a comma-delimeted string.
exclude_ids = ','.join(friends_list)
return {
'exclude_ids':exclude_ids,
'content':content,
'action_url':'',
'site':current_site,
'show_link':show_link,
}
|
ryszard/django-facebookconnect
|
facebookconnect/templatetags/facebook_tags.py
|
Python
|
gpl-3.0
| 6,440
|
[
"Brian"
] |
89d3805116da19e0b39b9bc52c6c5945879a78c2513b819b061f1ffec7312c09
|
import sys
import os
from io import StringIO
import numpy as np
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.gridspec as gridspec
from matplotlib import pylab
import matplotlib.pyplot as plt
import scipy as sp
from scipy.interpolate import interp1d
def plotData(x = np.r_[0:50], y = np.cos(np.r_[0:50]/6*np.pi), error = np.random.rand(50) * 0.5, numOfIter = 1,
y_median = np.sin(np.r_[0:50]/6*np.pi), y_max = np.cos(np.r_[0:50]/6*np.pi)+1.5, y_min = np.cos(np.r_[0:50]/6*np.pi)-1.5,
out_dir = '/home/yugin/VirtualboxShare/FEFF/out', window_title = 'test', case = '33'):
pylab.ion() # Force interactive
plt.close('all')
### for 'Qt4Agg' backend maximize figure
plt.switch_backend('QT5Agg', )
# plt.switch_backend('QT4Agg', )
fig = plt.figure( )
# gs1 = gridspec.GridSpec(1, 2)
# fig.show()
# fig.set_tight_layout(True)
figManager = plt.get_current_fig_manager()
DPI = fig.get_dpi()
fig.set_size_inches(1920.0 / DPI, 1080.0 / DPI)
gs = gridspec.GridSpec(1,1)
ax = fig.add_subplot(gs[0,0])
txt = 'GaMnAs case %s, ' % case + '$\chi(k)$ when the Number of the treated file is: {0}'.format(numOfIter)
fig.suptitle(txt, fontsize=22, fontweight='normal')
# Change the axes border width
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(2)
# plt.subplots_adjust(top=0.85)
# gs1.tight_layout(fig, rect=[0, 0.03, 1, 0.95])
fig.tight_layout(rect=[0.03, 0.03, 1, 0.95], w_pad=1.1)
# put window to the second monitor
# figManager.window.setGeometry(1923, 23, 640, 529)
figManager.window.setGeometry(1920, 20, 1920, 1180)
figManager.window.setWindowTitle(window_title)
figManager.window.showMinimized()
# plt.show()
ax.plot( x, y, label = '<$\chi(k)$>' )
ax.plot( x, y_median, label = '$\chi(k)$ median', color = 'darkcyan')
ax.plot( x, y_max, label = '$\chi(k)$ max', color = 'skyblue' )
ax.plot( x, y_min, label = '$\chi(k)$ min', color = 'lightblue' )
fig.tight_layout(rect=[0.03, 0.03, 1, 0.95], w_pad=1.1)
ax.plot(x, y, 'k', color='#1B2ACC')
ax.fill_between(x, y-error, y+error,
alpha=0.2, edgecolor='#1B2ACC', facecolor='#089FFF',
linewidth=4, linestyle='dashdot', antialiased=True, label = '$\chi(k)$')
ax.grid(True)
plt.legend()
ax.set_ylabel('$\chi(k)$', fontsize=20, fontweight='bold')
ax.set_xlabel('$k$', fontsize=20, fontweight='bold')
ax.set_ylim(ymin = -0.3, ymax= 0.5)
figManager.window.showMinimized()
# plt.draw()
# save to the PNG file:
out_file_name = '%s_' % (case) + "%05d.png" %(numOfIter)
fig.savefig( os.path.join(out_dir, out_file_name) )
if __name__ == "__main__":
plotData()
print ('plot the data')
|
yuginboy/from_GULP_to_FEFF
|
feff/libs/plot_data.py
|
Python
|
gpl-3.0
| 2,856
|
[
"FEFF"
] |
d866b9f1bae23540a1c776b348fcdb6b7e7ef78d8e412f4080f4abd6d8ceab3f
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
from acq4.devices.DAQGeneric import DAQGeneric, DAQGenericTask, DAQGenericTaskGui
from acq4.util.Mutex import Mutex, MutexLocker
#from acq4.devices.Device import *
from PyQt4 import QtCore, QtGui
import time
import numpy as np
from acq4.pyqtgraph.WidgetGroup import WidgetGroup
from collections import OrderedDict
from acq4.util.debug import printExc
from devTemplate import *
import subprocess, pickle, os
import acq4.pyqtgraph.multiprocess as mp
ivModes = {'i=0':'ic', 'vc':'vc', 'ic':'ic'}
modeNames = ['vc', 'i=0', 'ic']
class MockClamp(DAQGeneric):
sigModeChanged = QtCore.Signal(object)
def __init__(self, dm, config, name):
# Generate config to use for DAQ
daqConfig = {}
self.devLock = Mutex(Mutex.Recursive)
daqConfig = {
'command': config['Command'],
'primary': config['ScaledSignal'],
}
self.holding = {
'vc': config.get('vcHolding', -0.05),
'ic': config.get('icHolding', 0.0)
}
self.mode = 'i=0'
self.config = config
DAQGeneric.__init__(self, dm, daqConfig, name)
try:
self.setHolding()
except:
printExc("Error while setting holding value:")
# Start a remote process to run the simulation.
self.process = mp.Process()
rsys = self.process._import('sys')
rsys._setProxyOptions(returnType='proxy') # need to access remote path by proxy, not by value
rsys.path.append(os.path.abspath(os.path.dirname(__file__)))
if config['simulator'] == 'builtin':
self.simulator = self.process._import('hhSim')
elif config['simulator'] == 'neuron':
self.simulator = self.process._import('neuronSima')
dm.declareInterface(name, ['clamp'], self)
def createTask(self, cmd, parentTask):
return MockClampTask(self, cmd, parentTask)
def taskInterface(self, taskRunner):
return MockClampTaskGui(self, taskRunner)
def deviceInterface(self, win):
return MockClampDevGui(self)
def setHolding(self, mode=None, value=None, force=False):
global ivModes
with self.devLock:
currentMode = self.getMode()
if mode is None:
mode = currentMode
ivMode = ivModes[mode] ## determine vc/ic
if value is None:
value = self.holding[ivMode]
else:
self.holding[ivMode] = value
if ivMode == ivModes[currentMode] or force:
#gain = self.getCmdGain(mode)
## override the scale since getChanScale won't necessarily give the correct value
## (we may be about to switch modes)
#DAQGeneric.setChanHolding(self, 'command', value, scale=gain)
pass
self.sigHoldingChanged.emit('primary', self.holding.copy())
def setChanHolding(self, chan, value=None):
if chan == 'command':
self.setHolding(value=value)
else:
DAQGeneric.setChanHolding(self, chan, value)
def getChanHolding(self, chan):
if chan == 'command':
return self.getHolding()
else:
return DAQGeneric.getChanHolding(self, chan)
def getHolding(self, mode=None):
global ivModes
with self.devLock:
if mode is None:
mode = self.getMode()
ivMode = ivModes[mode] ## determine vc/ic
return self.holding[ivMode]
def listModes(self):
global modeNames
return modeNames
def setMode(self, mode):
"""Set the mode of the AxoPatch (by requesting user intervention). Takes care of switching holding levels in I=0 mode if needed."""
startMode = self.getMode()
if startMode == mode:
return
startIvMode = ivModes[startMode]
ivMode = ivModes[mode]
if (startIvMode == 'vc' and ivMode == 'ic') or (startIvMode == 'ic' and ivMode == 'vc'):
## switch to I=0 first
#self.requestModeSwitch('I=0')
self.mode = 'i=0'
self.setHolding(ivMode, force=True) ## we're in I=0 mode now, so it's ok to force the holding value.
### TODO:
### If mode switches back the wrong direction, we need to reset the holding value and cancel.
self.mode = ivMode
self.sigModeChanged.emit(self.mode)
def getMode(self):
return self.mode
def getChanUnits(self, chan):
global ivModes
iv = ivModes[self.getMode()]
if iv == 'vc':
units = ['V', 'A']
else:
units = ['A', 'V']
if chan == 'command':
return units[0]
elif chan == 'secondary':
return units[0]
elif chan == 'primary':
return units[1]
def readChannel(self, ch):
pass
def quit(self):
#self.process.send(None)
self.process.close()
DAQGeneric.quit(self)
class MockClampTask(DAQGenericTask):
def __init__(self, dev, cmd, parentTask):
## make a few changes for compatibility with multiclamp
if 'daqProtocol' not in cmd:
cmd['daqProtocol'] = {}
daqP = cmd['daqProtocol']
if 'command' in cmd:
if 'holding' in cmd:
daqP['command'] = {'command': cmd['command'], 'holding': cmd['holding']}
else:
daqP['command'] = {'command': cmd['command']}
daqP['command']['lowLevelConf'] = {'mockFunc': self.write}
cmd['daqProtocol']['primary'] = {'record': True, 'lowLevelConf': {'mockFunc': self.read}}
DAQGenericTask.__init__(self, dev, cmd['daqProtocol'], parentTask)
self.cmd = cmd
modPath = os.path.abspath(os.path.split(__file__)[0])
def configure(self):
### Record initial state or set initial value
##if 'holding' in self.cmd:
## self.dev.setHolding(self.cmd['mode'], self.cmd['holding'])
if 'mode' in self.cmd:
self.dev.setMode(self.cmd['mode'])
self.ampState = {'mode': self.dev.getMode()}
### Do not configure daq until mode is set. Otherwise, holding values may be incorrect.
DAQGenericTask.configure(self)
def read(self):
## Called by DAQGeneric to simulate a read-from-DAQ
res = self.job.result(timeout=30)._getValue()
return res
def write(self, data, dt):
## Called by DAQGeneric to simulate a write-to-DAQ
self.job = self.dev.simulator.run({'data': data, 'dt': dt, 'mode': self.cmd['mode']}, _callSync='async')
def isDone(self):
## check on neuron process
#return self.process.poll() is not None
return True
def stop(self, abort=False):
DAQGenericTask.stop(self, abort)
class MockClampTaskGui(DAQGenericTaskGui):
def __init__(self, dev, taskRunner):
DAQGenericTaskGui.__init__(self, dev, taskRunner, ownUi=False)
self.layout = QtGui.QGridLayout()
self.layout.setContentsMargins(0,0,0,0)
self.setLayout(self.layout)
self.splitter1 = QtGui.QSplitter()
self.splitter1.setOrientation(QtCore.Qt.Horizontal)
self.layout.addWidget(self.splitter1)
self.splitter2 = QtGui.QSplitter()
self.splitter2.setOrientation(QtCore.Qt.Vertical)
self.modeCombo = QtGui.QComboBox()
self.splitter2.addWidget(self.modeCombo)
self.modeCombo.addItems(self.dev.listModes())
self.splitter3 = QtGui.QSplitter()
self.splitter3.setOrientation(QtCore.Qt.Vertical)
(w1, p1) = self.createChannelWidget('primary')
(w2, p2) = self.createChannelWidget('command')
self.cmdWidget = w2
self.inputWidget = w1
self.cmdPlot = p2
self.inputPlot = p1
self.cmdWidget.setMeta('x', siPrefix=True, suffix='s', dec=True)
self.cmdWidget.setMeta('y', siPrefix=True, dec=True)
self.splitter1.addWidget(self.splitter2)
self.splitter1.addWidget(self.splitter3)
self.splitter2.addWidget(w1)
self.splitter2.addWidget(w2)
self.splitter3.addWidget(p1)
self.splitter3.addWidget(p2)
self.splitter1.setSizes([100, 500])
self.stateGroup = WidgetGroup([
(self.splitter1, 'splitter1'),
(self.splitter2, 'splitter2'),
(self.splitter3, 'splitter3'),
])
self.modeCombo.currentIndexChanged.connect(self.modeChanged)
self.modeChanged()
def saveState(self):
"""Return a dictionary representing the current state of the widget."""
state = {}
state['daqState'] = DAQGenericTaskGui.saveState(self)
state['mode'] = self.getMode()
#state['holdingEnabled'] = self.ctrl.holdingCheck.isChecked()
#state['holding'] = self.ctrl.holdingSpin.value()
return state
def restoreState(self, state):
"""Restore the state of the widget from a dictionary previously generated using saveState"""
#print 'state: ', state
#print 'DaqGeneric : ', dir(DAQGenericTaskGui)
if 'mode' in state:
self.modeCombo.setCurrentIndex(self.modeCombo.findText(state['mode']))
#self.ctrl.holdingCheck.setChecked(state['holdingEnabled'])
#if state['holdingEnabled']:
# self.ctrl.holdingSpin.setValue(state['holding'])
if 'daqState' in state:
return DAQGenericTaskGui.restoreState(self, state['daqState'])
else:
return None
def generateTask(self, params=None):
daqTask = DAQGenericTaskGui.generateTask(self, params)
task = {
'mode': self.getMode(),
'daqProtocol': daqTask
}
return task
def modeChanged(self):
global ivModes
ivm = ivModes[self.getMode()]
w = self.cmdWidget
if ivm == 'vc':
scale = 1e-3
cmdUnits = 'V'
inpUnits = 'A'
else:
scale = 1e-12
cmdUnits = 'A'
inpUnits = 'V'
self.inputWidget.setUnits(inpUnits)
self.cmdWidget.setUnits(cmdUnits)
self.cmdWidget.setMeta('y', minStep=scale, step=scale*10, value=0.)
self.inputPlot.setLabel('left', units=inpUnits)
self.cmdPlot.setLabel('left', units=cmdUnits)
#w.setScale(scale)
#for s in w.getSpins():
#s.setOpts(minStep=scale)
self.cmdWidget.updateHolding()
def getMode(self):
return str(self.modeCombo.currentText())
def getChanHolding(self, chan):
if chan == 'command':
return self.dev.getHolding(self.getMode())
else:
raise Exception("Can't get holding value for channel %s" % chan)
class MockClampDevGui(QtGui.QWidget):
def __init__(self, dev):
QtGui.QWidget.__init__(self)
self.dev = dev
self.ui = Ui_MockClampDevGui()
self.ui.setupUi(self)
self.ui.vcHoldingSpin.setOpts(step=1, minStep=1e-3, dec=True, suffix='V', siPrefix=True)
self.ui.icHoldingSpin.setOpts(step=1, minStep=1e-12, dec=True, suffix='A', siPrefix=True)
#self.ui.modeCombo.currentIndexChanged.connect(self.modeComboChanged)
self.modeRadios = {
'vc': self.ui.vcModeRadio,
'ic': self.ui.icModeRadio,
'i=0': self.ui.i0ModeRadio,
}
self.updateStatus()
for v in self.modeRadios.itervalues():
v.toggled.connect(self.modeRadioChanged)
self.ui.vcHoldingSpin.valueChanged.connect(self.vcHoldingChanged)
self.ui.icHoldingSpin.valueChanged.connect(self.icHoldingChanged)
self.dev.sigHoldingChanged.connect(self.devHoldingChanged)
self.dev.sigModeChanged.connect(self.devModeChanged)
def updateStatus(self):
global modeNames
mode = self.dev.getMode()
if mode is None:
return
vcHold = self.dev.getHolding('vc')
icHold = self.dev.getHolding('ic')
self.modeRadios[mode].setChecked(True)
#self.ui.modeCombo.setCurrentIndex(self.ui.modeCombo.findText(mode))
self.ui.vcHoldingSpin.setValue(vcHold)
self.ui.icHoldingSpin.setValue(icHold)
def devHoldingChanged(self, chan, hval):
if isinstance(hval, dict):
self.ui.vcHoldingSpin.blockSignals(True)
self.ui.icHoldingSpin.blockSignals(True)
self.ui.vcHoldingSpin.setValue(hval['vc'])
self.ui.icHoldingSpin.setValue(hval['ic'])
self.ui.vcHoldingSpin.blockSignals(False)
self.ui.icHoldingSpin.blockSignals(False)
def devModeChanged(self, mode):
for r in self.modeRadios.itervalues():
r.blockSignals(True)
#self.ui.modeCombo.blockSignals(True)
#self.ui.modeCombo.setCurrentIndex(self.ui.modeCombo.findText(mode))
self.modeRadios[mode].setChecked(True)
#self.ui.modeCombo.blockSignals(False)
for r in self.modeRadios.itervalues():
r.blockSignals(False)
def vcHoldingChanged(self):
self.dev.setHolding('vc', self.ui.vcHoldingSpin.value())
def icHoldingChanged(self):
self.dev.setHolding('ic', self.ui.icHoldingSpin.value())
def modeRadioChanged(self, m):
try:
if not m:
return
for mode, r in self.modeRadios.iteritems():
if r.isChecked():
self.dev.setMode(mode)
except CancelException:
self.updateStatus()
|
hiuwo/acq4
|
acq4/devices/MockClamp/MockClamp.py
|
Python
|
mit
| 14,370
|
[
"NEURON"
] |
7edae6590b8e9a109d2c629f09397331fd09445393533e82db041a7e1c2ff8f3
|
import numpy as np
import resource, multiprocessing
import os, sys, glob, time, shutil
from itertools import izip
from collections import defaultdict, Counter
from ete2 import Tree
from Bio import Phylo, SeqIO, AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Align import MultipleSeqAlignment
from sf_miscellaneous import times, read_fasta, \
load_pickle, write_pickle, write_in_fa, write_json, check_dependency, multips
sys.setrecursionlimit(50000)
nuc_alpha = 'ACGT-N'
aa_alpha = 'ACDEFGHIKLMNPQRSTVWY*-X'
def make_dir(dname):
import os
if not os.path.isdir(dname):
try:
os.makedirs(dname)
except OSError as e:
print "Cannot create run_dir",e
def remove_dir(dname):
import os, shutil
if os.path.isdir(dname):
import shutil
shutil.rmtree(dname)
def tree_to_json(node, extra_attr = []):
tree_json = {}
str_attr = ['country','region','clade','strain', 'date', 'muts']
num_attr = ['xvalue', 'yvalue', 'tvalue', 'num_date']
#if hasattr(node, 'name'):
# tree_json['strain'] = node.name
for prop in str_attr:
if hasattr(node, prop):
tree_json[prop] = node.__getattribute__(prop)
for prop in num_attr:
if hasattr(node, prop):
try:
tree_json[prop] = round(node.__getattribute__(prop),7)
except:
print "cannot round:", node.__getattribute__(prop), "assigned as is"
tree_json[prop] = node.__getattribute__(prop)
for prop in extra_attr:
if len(prop)==2 and callable(prop[1]):
if hasattr(node, prop[0]):
tree_json[prop] = prop[1](node.__getattribute__(prop[0]))
else:
if hasattr(node, prop):
tree_json[prop] = node.__getattribute__(prop)
if node.clades:
tree_json["children"] = []
for ch in node.clades:
tree_json["children"].append(tree_to_json(ch, extra_attr))
return tree_json
def make_strains_unique(aln):
name_translation = {}
for si,seq in enumerate(aln):
name_translation[seq.id] = 'tax_'+str(si+1)
seq.id = name_translation[seq.id]
seq.name = seq.id
return name_translation
def restore_strain_name(name_translation, seqs):
reverse_translation = {v:k for k,v in name_translation.iteritems()}
for seq in seqs:
if seq.name not in reverse_translation:
print("item not found",seq.name)
continue
if hasattr(seq, 'id'):
seq.id = reverse_translation[seq.name]
#seq.id = seq.id.replace('|','-',1)
seq.name = reverse_translation[seq.name]
def calc_af(aln, alpha):
aln_array = np.array(aln)
af = np.zeros((len(alpha), aln_array.shape[1]))
for ai, state in enumerate(alpha):
af[ai] += (aln_array==state).mean(axis=0)
af[-1] = 1.0 - af[:-1].sum(axis=0)
return af
def resolve_polytomies(tree):
for node in tree.get_nonterminals('preorder'):
node.confidence = None
if len(node.clades)>2:
n = len(node.clades)
children = list(node.clades)
node.clades = []
node.split(branch_length=1e-5)
if n>3:
node.clades[0].clades = children[:len(children)//2]
node.clades[1].clades = children[len(children)//2:]
for c in node.clades:
c.name=''
c.confidence = None
else:
node.clades[0] = children[0]
node.clades[1].clades = children[1:]
node.clades[1].confidence = None
node.clades[1].name = None
def pad_nucleotide_sequences(aln_aa, seq_nuc):
'''
introduce gaps of 3 (---) into nucleotide sequences corresponding to aligned DNA sequences.
Parameters:
- aln_aa: amino acid alignment
- seq_nuc: unaligned nucleotide sequences.
Returns:
- aligned nucleotide sequences with all gaps length 3
'''
aln_nuc = MultipleSeqAlignment([])
for aa_seq in aln_aa:
try:
tmp_nuc_seq = str(seq_nuc[aa_seq.id].seq)
except KeyError as e:
print aa_seq.id
print 'Key not found, continue with next sequence'
continue
tmpseq = ''
nuc_pos = 0
for aa in aa_seq:
if aa=='-':
tmpseq='%s---'%tmpseq
else:
tmpseq='%s%s'%(tmpseq,tmp_nuc_seq[nuc_pos:(nuc_pos+3)])
nuc_pos+=3
aln_nuc.append(SeqRecord(seq=Seq(tmpseq),id=aa_seq.id))
return aln_nuc
def polytomies_midpointRooting(infileName, outfileName, clusterID):
# use ete2 to solve polytomies and midpoint rooting
from ete2 import Tree
newickString=open(infileName, 'rb').readline().rstrip()
tree = Tree(newickString,format=1);
tree.resolve_polytomy(recursive=True)
try:
tree.set_outgroup( tree.get_midpoint_outgroup() )
except:
pass
#print clusterID, ' can not conduct midpoint rooting'
tree.ladderize()
## adding the missing node.name
#for ind, node in enumerate(tree.traverse("postorder")):
for ind, node in enumerate(tree.iter_descendants("postorder")):
if node.name=='': node.name='%s%s'%('NODE_0',ind);
with open('./%s'%outfileName, 'wb') as outfile:
outfile.write(tree.write(format=1))
class mpm_tree(object):
'''
class that aligns a set of sequences and infers a tree
'''
def __init__(self, cluster_seq_filepath, **kwarks):
self.clusterID= cluster_seq_filepath.split('/')[-1].split('.fna')[0]
if 'speciesID' in kwarks:
folderID=kwarks['speciesID']
else:
folderID= cluster_seq_filepath.split('/')[-3]
self.seqs = {x.id:x for x in SeqIO.parse(cluster_seq_filepath, 'fasta')}
if 'run_dir' not in kwarks:
import random
#self.run_dir = '_'.join(['tmp', self.clusterID])
self.run_dir = 'tmp/'
self.run_dir += '_'.join([folderID, 'tmp', time.strftime('%H%M%S',time.gmtime()), str(random.randint(0,100000000))])
else:
self.run_dir = kwarks['run_dir']
self.nuc=True
def codon_align(self, alignment_tool="mafft", prune=True, discard_premature_stops=False):
'''
takes a nucleotide alignment, translates it, aligns the amino acids, pads the gaps
note that this suppresses any compensated frameshift mutations
Parameters:
- alignment_tool: ['mafft', 'muscle'] the commandline tool to use
'''
cwd = os.getcwd()
make_dir(self.run_dir)
os.chdir(self.run_dir)
# translate
aa_seqs = {}
for seq in self.seqs.values():
tempseq = seq.seq.translate(table="Bacterial")
# use only sequences that translate without trouble
if not discard_premature_stops or '*' not in str(tempseq)[:-1] or prune==False:
aa_seqs[seq.id]=SeqRecord(tempseq,id=seq.id)
else:
print(seq.id,"has premature stops, discarding")
tmpfname = 'temp_in.fasta'
SeqIO.write(aa_seqs.values(), tmpfname,'fasta')
if alignment_tool=='mafft':
os.system('mafft --reorder --amino temp_in.fasta 1> temp_out.fasta')
aln_aa = AlignIO.read('temp_out.fasta', "fasta")
elif alignment_tool=='muscle':
from Bio.Align.Applications import MuscleCommandline
cline = MuscleCommandline(input=tmpfname, out=tmpfname[:-5]+'aligned.fasta')
cline()
aln_aa = AlignIO.read(tmpfname[:-5]+'aligned.fasta', "fasta")
else:
print 'Alignment tool not supported:'+alignment_tool
#return
#generate nucleotide alignment
self.aln = pad_nucleotide_sequences(aln_aa, self.seqs)
os.chdir(cwd)
remove_dir(self.run_dir)
def align(self):
'''
align sequencences in self.seqs using mafft
'''
cwd = os.getcwd()
make_dir(self.run_dir)
os.chdir(self.run_dir)
SeqIO.write(self.seqs.values(), "temp_in.fasta", "fasta")
os.system('mafft --reorder --anysymbol temp_in.fasta 1> temp_out.fasta 2> mafft.log')
self.aln = AlignIO.read('temp_out.fasta', 'fasta')
os.chdir(cwd)
remove_dir(self.run_dir)
def build(self, root='midpoint', raxml=True, fasttree_program='fasttree', raxml_time_limit=0.5, treetime_used=True):
'''
build a phylogenetic tree using fasttree and raxML (optional)
based on nextflu tree building pipeline
'''
import subprocess
cwd = os.getcwd()
make_dir(self.run_dir)
os.chdir(self.run_dir)
AlignIO.write(self.aln, 'origin.fasta', 'fasta')
name_translation = make_strains_unique(self.aln)
AlignIO.write(self.aln, 'temp.fasta', 'fasta')
tree_cmd = [fasttree_program]
if self.nuc: tree_cmd.append("-nt")
tree_cmd.append("temp.fasta 1> initial_tree.newick 2> fasttree.log")
os.system(" ".join(tree_cmd))
out_fname = "tree_infer.newick"
if raxml==False:
#shutil.copy('initial_tree.newick', out_fname)
polytomies_midpointRooting('initial_tree.newick',out_fname, self.clusterID)
elif len(set([x.id for x in SeqIO.parse('temp.fasta', 'fasta')]))>3:
## only for tree with >3 strains
if raxml_time_limit>0:
tmp_tree = Phylo.read('initial_tree.newick','newick')
resolve_iter = 0
resolve_polytomies(tmp_tree)
while (not tmp_tree.is_bifurcating()) and (resolve_iter<10):
resolve_iter+=1
resolve_polytomies(tmp_tree)
Phylo.write(tmp_tree,'initial_tree.newick', 'newick')
AlignIO.write(self.aln,"temp.phyx", "phylip-relaxed")
print( "RAxML tree optimization with time limit", raxml_time_limit, "hours")
# using exec to be able to kill process
end_time = time.time() + int(raxml_time_limit*3600)
process = subprocess.Popen("exec raxml -f d -j -s temp.phyx -n topology -c 25 -m GTRCAT -p 344312987 -t initial_tree.newick", shell=True)
while (time.time() < end_time):
if os.path.isfile('RAxML_result.topology'):
break
time.sleep(10)
process.terminate()
checkpoint_files = glob.glob("RAxML_checkpoint*")
if os.path.isfile('RAxML_result.topology'):
checkpoint_files.append('RAxML_result.topology')
if len(checkpoint_files) > 0:
last_tree_file = checkpoint_files[-1]
shutil.copy(last_tree_file, 'raxml_tree.newick')
else:
shutil.copy("initial_tree.newick", 'raxml_tree.newick')
else:
shutil.copy("initial_tree.newick", 'raxml_tree.newick')
try:
print("RAxML branch length optimization")
os.system("raxml -f e -s temp.phyx -n branches -c 25 -m GTRGAMMA -p 344312987 -t raxml_tree.newick")
shutil.copy('RAxML_result.branches', out_fname)
except:
print("RAxML branch length optimization failed")
shutil.copy('raxml_tree.newick', out_fname)
if treetime_used:
# load the resulting tree as a treetime instance
from treetime import TreeAnc
self.tt = TreeAnc(tree=out_fname, aln=self.aln, gtr='Jukes-Cantor', verbose=0)
# provide short cut to tree and revert names that conflicted with newick format
self.tree = self.tt.tree
else:
self.tree = Phylo.read(out_fname,'newick')
self.tree.root.branch_length=0.0001
restore_strain_name(name_translation, self.aln)
restore_strain_name(name_translation, self.tree.get_terminals())
for node in self.tree.find_clades():
if node.name is not None:
if node.name.startswith('NODE_')==False:
node.ann=node.name
else:
node.name='NODE_0'
os.chdir(cwd)
remove_dir(self.run_dir)
self.is_timetree=False
def ancestral(self, translate_tree = False):
'''
infer ancestral nucleotide sequences using maximum likelihood
and translate the resulting sequences (+ terminals) to amino acids
'''
try:
self.tt.reconstruct_anc(method='ml')
except:
print "trouble at self.tt.reconstruct_anc(method='ml')"
if translate_tree:
for node in self.tree.find_clades():
node.aa_sequence = np.fromstring(str(self.translate_seq("".join(node.sequence))), dtype='S1')
def refine(self, CDS = True):
'''
determine mutations on each branch and attach as string to the branches
'''
for node in self.tree.find_clades():
if node.up is not None:
node.muts = ",".join(["".join(map(str, x)) for x in node.mutations if '-' not in x])
if CDS == True:
node.aa_muts = ",".join([anc+str(pos+1)+der for pos, (anc, der)
in enumerate(zip(node.up.aa_sequence, node.aa_sequence))
if anc!=der and '-' not in anc and '-' not in der])
def translate_seq(self, seq):
'''
custom translation sequence that handles gaps
'''
if type(seq) not in [str, unicode]:
str_seq = str(seq.seq)
else:
str_seq = seq
try:
# soon not needed as future biopython version will translate --- into -
tmp_seq = Seq(str(Seq(str_seq.replace('---', 'NNN')).translate(table="Bacterial")).replace('X','-'))
except:
tmp_seq = Seq(str(Seq(str_seq.replace('-', 'N')).translate(table="Bacterial")).replace('X','-'))
return tmp_seq
def translate(self):
'''
translate the nucleotide alignment to an amino acid alignment
'''
aa_seqs = []
for seq in self.aln:
aa_seqs.append(SeqRecord(seq=self.translate_seq(seq), id=seq.id,
name=seq.name, description=seq.description))
self.aa_aln = MultipleSeqAlignment(aa_seqs)
def mean_std_seqLen(self):
""" returen mean and standard deviation of sequence lengths """
seqLen_arr = np.array([ len(seq) for seq in self.seqs.values()])
return np.mean(seqLen_arr, axis=0), np.std(seqLen_arr, axis=0)
def paralogy_statistics(self):
best_split = find_best_split(self.tree)
return len(best_split.para_nodes), best_split.branch_length
def diversity_statistics_nuc(self):
''' calculate alignment entropy of nucleotide alignments '''
TINY = 1e-10
if not hasattr(self, "aln"):
print("calculate alignment first")
return
self.af_nuc = calc_af(self.aln, nuc_alpha)
is_valid = self.af_nuc[:-2].sum(axis=0)>0.5
tmp_af = self.af_nuc[:-2,is_valid]/self.af_nuc[:-2,is_valid].sum(axis=0)
#self.entropy_nuc = np.mean(-(tmp_af*np.log(tmp_af+TINY)).sum(axis=0))
self.diversity_nuc = np.mean(1.0-(tmp_af**2).sum(axis=0))
def diversity_statistics_aa(self):
''' calculate alignment entropy of nucleotide alignments '''
TINY = 1e-10
if not hasattr(self, "aln"):
print("calculate alignment first")
return
self.af_aa = calc_af(self.aa_aln, aa_alpha)
is_valid = self.af_aa[:-2].sum(axis=0)>0.5
tmp_af = self.af_aa[:-2,is_valid]/self.af_aa[:-2,is_valid].sum(axis=0)
#self.entropy_aa = np.mean(-(tmp_af*np.log(tmp_af+TINY)).sum(axis=0))
self.diversity_aa = np.mean(1.0-(tmp_af**2).sum(axis=0))
def mutations_to_branch(self):
self.mut_to_branch = defaultdict(list)
for node in self.tree.find_clades():
if node.up is not None:
for mut in node.mutations:
self.mut_to_branch[mut].append(node)
def reduce_alignments(self,RNA_specific=False):
if RNA_specific:
self.aa_aln=None
self.af_aa =None
else:
self.af_aa= calc_af(self.aa_aln, aa_alpha)
for attr, aln, alpha, freq in [["aln_reduced", self.aln, nuc_alpha, self.af_nuc],
["aa_aln_reduced", self.aa_aln, aa_alpha, self.af_aa]]:
try:
if RNA_specific and attr=="aa_aln_reduced":
pass #** no reduced amino alignment for RNA
else:
consensus = np.array(list(alpha))[freq.argmax(axis=0)]
aln_array = np.array(aln)
aln_array[aln_array==consensus]='.'
new_seqs = [SeqRecord(seq=Seq("".join(consensus)), name="consensus", id="consensus")]
for si, seq in enumerate(aln):
new_seqs.append(SeqRecord(seq=Seq("".join(aln_array[si])), name=seq.name,
id=seq.id, description=seq.description))
self.__setattr__(attr, MultipleSeqAlignment(new_seqs))
except:
print("sf_geneCluster_align_MakeTree: aligment reduction failed")
#def export(self, path = '', extra_attr = ['aa_muts','ann','branch_length','name','longName'], RNA_specific=False):
def export(self, path = '', extra_attr = ['aa_muts','annotation','branch_length','name','accession'], RNA_specific=False):
## write tree
Phylo.write(self.tree, path+self.clusterID+'.nwk', 'newick')
## processing node name
for node in self.tree.get_terminals():
#node.name = node.ann.split('|')[0]
node.accession = node.ann.split('|')[0]
#node.longName = node.ann.split('-')[0]
node.name = node.ann.split('-')[0]
#NZ_CP008870|HV97_RS21955-1-fabG_3-ketoacyl-ACP_reductase
annotation=node.ann.split('-',2)
if len(annotation)==3:
node.annotation= annotation[2]
else:
node.annotation= annotation[0]
## write tree json
for n in self.tree.root.find_clades():
if n.branch_length<1e-6:
n.branch_length = 1e-6
timetree_fname = path+self.clusterID+'_tree.json'
tree_json = tree_to_json(self.tree.root, extra_attr=extra_attr)
write_json(tree_json, timetree_fname, indent=None)
self.reduce_alignments(RNA_specific)
## msa compatible
for i_aln in self.aln:
i_aln.id=i_aln.id.replace('|','-',1)
for i_alnr in self.aln_reduced:
i_alnr.id=i_alnr.id.replace('|','-',1)
AlignIO.write(self.aln, path+self.clusterID+'_na_aln.fa', 'fasta')
AlignIO.write(self.aln_reduced, path+self.clusterID+'_na_aln_reduced.fa', 'fasta')
if RNA_specific==False:
for i_aa_aln in self.aa_aln:
i_aa_aln.id=i_aa_aln.id.replace('|','-',1)
for i_aa_alnr in self.aa_aln_reduced:
i_aa_alnr.id=i_aa_alnr.id.replace('|','-',1)
AlignIO.write(self.aa_aln, path+self.clusterID+'_aa_aln.fa', 'fasta')
AlignIO.write(self.aa_aln_reduced, path+self.clusterID+'_aa_aln_reduced.fa', 'fasta')
## write seq json
write_seq_json=0
if write_seq_json:
elems = {}
for node in self.tree.find_clades():
if hasattr(node, "sequence"):
if hasattr(node, "longName")==False:
node.longName=node.name
elems[node.longName] = {}
nuc_dt= {pos:state for pos, (state, ancstate) in
enumerate(izip(node.sequence.tostring(), self.tree.root.sequence.tostring())) if state!=ancstate}
nodeseq=node.sequence.tostring();nodeseq_len=len(nodeseq)
elems[node.longName]['nuc']=nuc_dt
elems['root'] = {}
elems['root']['nuc'] = self.tree.root.sequence.tostring()
self.sequences_fname=path+self.clusterID+'_seq.json'
write_json(elems, self.sequences_fname, indent=None)
################################################################################
### functions to run the tree building and alignment routines
################################################################################
def align_and_makeTree( fna_file_list, alignFile_path, simple_tree):
fasttree_name= 'fasttree' if check_dependency('fasttree') else 'FastTree'
for gene_cluster_nu_filename in fna_file_list:
try:
# extract GC_00002 from path/GC_00002.aln
clusterID = gene_cluster_nu_filename.split('/')[-1].split('.')[0]
start = time.time();
geneDiversity_file = open(alignFile_path+'gene_diversity.txt', 'a')
if len( read_fasta(gene_cluster_nu_filename) )==1: # nothing to do for singletons
## na_aln.fa
gene_cluster_nu_aln_filename= gene_cluster_nu_filename.replace('.fna','_na_aln.fa')
## geneSeqID separator '|' is replaced by '-' for msa viewer compatibility
with open(gene_cluster_nu_aln_filename,'wb') as write_file:
for SeqID, Sequence in read_fasta(gene_cluster_nu_filename).iteritems():
write_in_fa(write_file, SeqID.replace('|','-'), Sequence)
os.system( ' '.join(['cp',gene_cluster_nu_aln_filename,gene_cluster_nu_aln_filename.replace('_aln','_aln_reduced')]) )
## aa_aln.fa
gene_cluster_aa_filename= gene_cluster_nu_filename.replace('.fna','.faa')
gene_cluster_aa_aln_filename= gene_cluster_nu_filename.replace('.fna','_aa_aln.fa')
## geneSeqID separator '|' is replaced by '-' for msa viewer compatibility
with open(gene_cluster_aa_aln_filename,'wb') as write_file:
for SeqID, Sequence in read_fasta(gene_cluster_aa_filename).iteritems():
write_in_fa(write_file, SeqID.replace('|','-'), Sequence)
os.system( ' '.join(['cp',gene_cluster_aa_aln_filename,gene_cluster_aa_aln_filename.replace('_aln','_aln_reduced')]) )
geneDiversity_file.write('%s\t%s\n'%(clusterID,'0.0'))
else: # align and build tree
#print gene_cluster_nu_filename
myTree = mpm_tree(gene_cluster_nu_filename)
myTree.codon_align()
myTree.translate()
if simple_tree==False:
myTree.build(raxml=False,fasttree_program=fasttree_name,treetime_used=True)
myTree.ancestral(translate_tree=True)
myTree.refine()
else:
myTree.build(raxml=False,fasttree_program=fasttree_name,treetime_used=False)
myTree.diversity_statistics_nuc()
myTree.export(path=alignFile_path)
#myTree.diversity_statistics_aa()
#random_alnID=myTree.seqs.keys()[0].split('-')[0]
diversity_nuc= round(myTree.diversity_nuc,3)#diversity_aa=round(myTree.diversity_aa,3)
#bestSplit_paraNodes,bestSplit_branchLen = myTree.paralogy_statistics()
#mean_seqLen, std_seqLen= myTree.mean_std_seqLen()
#mean_seqLen, std_seqLen= [ round(i,3) for i in mean_seqLen, std_seqLen ]
geneDiversity_file.write('%s\t%s\n'%(clusterID,diversity_nuc))
if 0:
cluster_correl_stats_file = open(alignFile_path+'cluster_correl_stats.txt', 'a')
cluster_correl_stats_file.write('%s\n'%'\t'.join([
str(i) for i in [clusterID, random_alnID, diversity_nuc, \
mean_seqLen, std_seqLen, bestSplit_paraNodes, bestSplit_branchLen ] ]))
except:
print("Aligning and tree building of %s failed"%gene_cluster_nu_filename)
def mem_check(flag):
print(flag, ' memory usage: %.2f GB' % round(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/(1024.0**2),1))
def create_geneCluster_fa(path,folders_dict):
""" dict storing amino_acid Id/Seq from '.faa' files
input: '.faa', '_gene_nuc_dict.cpk', 'allclusters.cpk'
output:
"""
## make sure the geneCluster folder is empty
os.system('rm -rf %s'%(path+'geneCluster/'))
clustering_path= folders_dict['clustering_path']
geneCluster_dt= load_pickle(clustering_path+'allclusters.cpk')
protein_path= folders_dict['protein_path']
nucleotide_path= folders_dict['nucleotide_path']
geneID_to_geneSeqID_dict=load_pickle(path+'geneID_to_geneSeqID.cpk')
gene_aa_dict= load_pickle('%s%s'%(protein_path,'all_protein_seq.cpk'))
gene_na_dict= load_pickle('%s%s'%(nucleotide_path,'all_nucleotide_seq.cpk'))
## create cluster-genes fasta files
cluster_seqs_path=path+'geneCluster/'
os.system('mkdir '+cluster_seqs_path)
## write nuc/aa sequences for each cluster
for clusterID, gene in geneCluster_dt.iteritems():
## geneCluster file name
gene_cluster_nu_filename="%s%s"%(clusterID,'.fna')
gene_cluster_aa_filename="%s%s"%(clusterID,'.faa')
with open( cluster_seqs_path+gene_cluster_nu_filename, 'wb') as gene_cluster_nu_write, \
open( cluster_seqs_path+gene_cluster_aa_filename, 'wb') as gene_cluster_aa_write:
## write nucleotide/amino_acid sequences into geneCluster files
for gene_memb in gene[1]:
## gene_name format: strain_1|locusTag
strain_name= gene_memb.split('|')[0]
geneSeqID=geneID_to_geneSeqID_dict[gene_memb]
write_in_fa(gene_cluster_nu_write, geneSeqID, gene_na_dict[strain_name][gene_memb] )
write_in_fa(gene_cluster_aa_write, geneSeqID, gene_aa_dict[strain_name][gene_memb])
def cluster_align_makeTree( path, folders_dict, parallel, disable_cluster_postprocessing, simple_tree):
"""
create gene clusters as nucleotide/ amino_acid fasta files
and build individual gene trees based on fna files
"""
proc= multiprocessing.Process(target=create_geneCluster_fa, args=(path, folders_dict))
proc.start(); proc.join()
## align, build_tree, make_geneTree_json
cluster_seqs_path = path+'geneCluster/'
if os.path.exists(cluster_seqs_path+'gene_diversity.txt'):
os.system('rm '+cluster_seqs_path+'gene_diversity.txt')
if 0:
with open(cluster_seqs_path+'cluster_correl_stats.txt', 'wb') as cluster_correl_stats_file:
cluster_correl_stats_file.write('%s\n'%'\t'.join(
['clusterID', 'random_alnID', 'diversity_nuc', \
'mean_seqLen', 'std_seqLen', 'bestSplit_paraNodes', 'bestSplit_branchLen'
]))
fna_file_list=glob.glob(cluster_seqs_path+"*.fna")
multips(align_and_makeTree, parallel, fna_file_list,
cluster_seqs_path, simple_tree)
## if cluster_postprocessing skipped, rename allclusters.tsv and allclusters.cpk as the final cluster file
if disable_cluster_postprocessing:
update_diversity_cpk(path)
clustering_path= '%s%s'%(path,'protein_faa/diamond_matches/')
os.system('cp %sallclusters.tsv %sallclusters_final.tsv'%(clustering_path,clustering_path))
os.system('cp %sallclusters.cpk %sallclusters_postprocessed.cpk'%(clustering_path,clustering_path))
def find_best_split(tree):
'''
iterate over all branches in the tree and find the branch with the largest
intersection of node names up- and down-stream.
'''
#make sets of unique child node names for each node
for node in tree.find_clades(order='postorder'):
if node.is_terminal():
node.leafs = set([node.name.split('|')[0]])
else:
node.leafs = set.union(*[c.leafs for c in node])
# make sets of unique names tips *NOT* in the clade defined by child
best_split = [None, 0, 0]
tree.root.not_leafs=set()
for node in tree.get_nonterminals(order='preorder'):
for child in node:
child.not_leafs = set(node.not_leafs).union(*[c.leafs for c in node if c!=child])
child.para_nodes = set.intersection(child.not_leafs, child.leafs)
## calcuate split branch length
## if the parent of the best split is the root, the branch_length is the sum of the two children of the root.
child.split_bl = child.branch_length+ ([c.branch_length for c in node if c!=child][0] if node==tree.root else 0)
more_para_nodes = len(child.para_nodes)>best_split[1]
longer_branch = len(child.para_nodes)==best_split[1] and child.split_bl>best_split[2]
if more_para_nodes or longer_branch:
best_split = [child, len(child.para_nodes), child.split_bl]
return best_split[0]
def update_diversity_cpk(path):
## write gene_diversity_Dt cpk file
output_path = path+'geneCluster/'
with open(output_path+'gene_diversity.txt', 'rb') as infile:
write_pickle(output_path+'gene_diversity.cpk',{ i.rstrip().split('\t')[0]:i.rstrip().split('\t')[1] for i in infile})
def update_geneCluster_cpk(path, geneCluster_dt):
## update gene cluster pickled file
cluster_path = path+'protein_faa/diamond_matches/'
write_pickle(cluster_path+'allclusters_postprocessed.cpk',geneCluster_dt)
#write_final_cluster(path, geneCluster_dt)
def write_final_cluster(path):
geneCluster_dt=load_sorted_clusters(path)
outfileName='allclusters_final.tsv'
with open(path+'protein_faa/diamond_matches/'+outfileName, 'wb') as outfile:
for clusterID, cluster_stat in geneCluster_dt:
outfile.write('\t'.join([gene for gene in cluster_stat[1]]))
outfile.write('\n')
def load_sorted_clusters(path):
'''
load gene clusters and sort 1st by abundance and then by clusterID
'''
geneClusterPath='%s%s'%(path,'protein_faa/diamond_matches/')
geneCluster_dt=load_pickle(geneClusterPath+'allclusters_postprocessed.cpk')
from operator import itemgetter
# sort by decreasing abundance (-v[0], minus to achieve decreasing)
# followed by increasing strain count
return sorted(geneCluster_dt.iteritems(),
key=lambda (k,v): (-itemgetter(0)(v),itemgetter(2)(v)), reverse=False)
#return sorted(geneCluster_dt.iteritems(),
# key=lambda (k,v): (-itemgetter(0)(v),itemgetter(2)(v)), reverse=False)
|
neherlab/pan-genome-analysis
|
scripts/sf_geneCluster_align_makeTree.py
|
Python
|
gpl-3.0
| 31,005
|
[
"Biopython"
] |
7f2182a11512c8c12d579daa137f0bae280cfb6f8cd155733a39d428e8078ea6
|
#################################################################
# $HeadURL$
#################################################################
"""
.. module:: Pfn
:synopsis: ProcessPool and related classes
ProcessPool
ProcessPool creates a pool of worker subprocesses to handle a queue of tasks
much like the producers/consumers paradigm. Users just need to fill the queue
with tasks to be executed and worker tasks will execute them.
To construct ProcessPool one first should call its constructor::
pool = ProcessPool( minSize, maxSize, maxQueuedRequests )
where parameters are:
:param int minSize: at least <minSize> workers will be alive all the time
:param int maxSize: no more than <maxSize> workers will be alive all the time
:param int maxQueuedRequests: size for request waiting in a queue to be executed
In case another request is added to the full queue, the execution will
lock until another request is taken out. The ProcessPool will automatically increase and
decrease the pool of workers as needed, of course not exceeding above limits.
To add a task to the queue one should execute:::
pool.createAndQueueTask( funcDef,
args = ( arg1, arg2, ... ),
kwargs = { "kwarg1" : value1, "kwarg2" : value2 },
callback = callbackDef,
exceptionCallback = exceptionCallBackDef )
or alternatively by using ProcessTask instance:::
task = ProcessTask( funcDef,
args = ( arg1, arg2, ... )
kwargs = { "kwarg1" : value1, .. },
callback = callbackDef,
exceptionCallback = exceptionCallbackDef )
pool.queueTask( task )
where parameters are:
:param funcDef: callable by object definition (function, lambda, class with __call__ slot defined
:param list args: argument list
:param dict kwargs: keyword arguments dictionary
:param callback: callback function definition
:param exceptionCallback: exception callback function definition
The callback, exceptionCallbaks and the parameters are all optional. Once task has been added to the pool,
it will be executed as soon as possible. Worker subprocesses automatically return the return value of the task.
To obtain those results one has to execute::
pool.processRequests()
This method will process the existing return values of the task, even if the task does not return
anything. This method has to be called to clean the result queues. To wait until all the requests are finished
and process their result call::
pool.processAllRequests()
This function will block until all requests are finished and their result values have been processed.
It is also possible to set the ProcessPool in daemon mode, in which all results are automatically
processed as soon they are available, just after finalization of task execution. To enable this mode one
has to call::
pool.daemonize()
Callback functions
There are two types of callbacks that can be executed for each tasks: exception callback function and
results callback function. The first one is executed when unhandled exception has been raised during
task processing, and hence no task results are available, otherwise the execution of second callback type
is performed.
The callbacks could be attached in a two places:
- directly in ProcessTask, in that case those have to be shelvable/picklable, so they should be defined as
global functions with the signature :callback( task, taskResult ): where :task: is a :ProcessTask:
reference and :taskResult: is whatever task callable it returning for results callback and
:exceptionCallback( task, exc_info): where exc_info is a
:S_ERROR( "Exception": { "Value" : exceptionName, "Exc_info" : exceptionInfo ):
- in ProcessPool, in that case there is no limitation on the function type, except the signature, which
should follow :callback( task ): or :exceptionCallback( task ):, as those callbacks definitions
are not put into the queues
The first types of callbacks could be used in case various callable objects are put into the ProcessPool,
so you probably want to handle them differently depending on their results, while the second types are for
executing same type of callables in subprocesses and hence you are expecting the same type of results
everywhere.
"""
__RCSID__ = "$Id$"
import multiprocessing
import sys
import time
import threading
import os
import signal
import Queue
import errno
from types import FunctionType, TypeType, ClassType
try:
from DIRAC.FrameworkSystem.Client.Logger import gLogger
except ImportError:
gLogger = None
try:
from DIRAC.Core.Utilities.LockRing import LockRing
except ImportError:
LockRing = None
try:
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
except ImportError:
def S_OK(val=""):
""" dummy S_OK """
return {'OK': True, 'Value': val}
def S_ERROR(mess):
""" dummy S_ERROR """
return {'OK': False, 'Message': mess}
sLog = gLogger.getSubLogger(__name__)
class WorkingProcess(multiprocessing.Process):
"""
.. class:: WorkingProcess
WorkingProcess is a class that represents activity that runs in a separate process.
It is running main thread (process) in daemon mode, reading tasks from :pendingQueue:, executing
them and pushing back tasks with results to the :resultsQueue:. If task has got a timeout value
defined a separate threading.Timer thread is started killing execution (and destroying worker)
after :ProcessTask.__timeOut: seconds.
Main execution could also terminate in a few different ways:
* on every failed read attempt (from empty :pendingQueue:), the idle loop counter is increased,
worker is terminated when counter is reaching a value of 10;
* when stopEvent is set (so ProcessPool is in draining mode),
* when parent process PID is set to 1 (init process, parent process with ProcessPool is dead).
"""
def __init__(self, pendingQueue, resultsQueue, stopEvent, keepRunning):
""" c'tor
:param self: self reference
:param pendingQueue: queue storing ProcessTask before exection
:type pendingQueue: multiprocessing.Queue
:param resultsQueue: queue storing callbacks and exceptionCallbacks
:type resultsQueue: multiprocessing.Queue
:param stopEvent: event to stop processing
:type stopEvent: multiprocessing.Event
"""
multiprocessing.Process.__init__(self)
# # daemonize
self.daemon = True
# # flag to see if task is being treated
self.__working = multiprocessing.Value('i', 0)
# # task counter
self.__taskCounter = multiprocessing.Value('i', 0)
# # task queue
self.__pendingQueue = pendingQueue
# # results queue
self.__resultsQueue = resultsQueue
# # stop event
self.__stopEvent = stopEvent
# # keep process running until stop event
self.__keepRunning = keepRunning
# # placeholder for watchdog thread
self.__watchdogThread = None
# # placeholder for process thread
self.__processThread = None
# # placeholder for current task
self.task = None
# # start yourself at least
self.start()
def __watchdog(self):
"""
Watchdog thread target
Terminating/killing WorkingProcess when parent process is dead
:param self: self reference
"""
while True:
# # parent is dead, commit suicide
if os.getppid() == 1:
os.kill(self.pid, signal.SIGTERM)
# # wait for half a minute and if worker is still alive use REAL silencer
time.sleep(30)
# # now you're dead
os.kill(self.pid, signal.SIGKILL)
# # wake me up in 5 seconds
time.sleep(5)
def isWorking(self):
"""
Check if process is being executed
:param self: self reference
"""
return self.__working.value == 1
def taskProcessed(self):
"""
Tell how many tasks have been processed so far
:param self: self reference
"""
return self.__taskCounter
def __processTask(self):
"""
processThread target
:param self: self reference
"""
if self.task:
self.task.process()
def run(self):
"""
Task execution
Reads and executes ProcessTask :task: out of pending queue and then pushes it
to the results queue for callback execution.
:param self: self reference
"""
# # start watchdog thread
self.__watchdogThread = threading.Thread(target=self.__watchdog)
self.__watchdogThread.daemon = True
self.__watchdogThread.start()
# # http://cdn.memegenerator.net/instances/400x/19450565.jpg
if LockRing:
# Reset all locks
lr = LockRing()
lr._openAll()
lr._setAllEvents()
# # zero processed task counter
taskCounter = 0
# # zero idle loop counter
idleLoopCount = 0
# # main loop
while True:
# # draining, stopEvent is set, exiting
if self.__stopEvent.is_set():
return
# # clear task
self.task = None
# # read from queue
try:
task = self.__pendingQueue.get(block=True, timeout=10)
except Queue.Empty:
# # idle loop?
idleLoopCount += 1
# # 10th idle loop - exit, nothing to do
if idleLoopCount == 10 and not self.__keepRunning:
return
continue
# # toggle __working flag
self.__working.value = 1
# # save task
self.task = task
# # reset idle loop counter
idleLoopCount = 0
# # process task in a separate thread
self.__processThread = threading.Thread(target=self.__processTask)
self.__processThread.start()
timeout = False
noResults = False
# # join processThread with or without timeout
if self.task.getTimeOut():
self.__processThread.join(self.task.getTimeOut() + 10)
else:
self.__processThread.join()
# # processThread is still alive? stop it!
if self.__processThread.is_alive():
self.__processThread._Thread__stop()
self.task.setResult(S_ERROR(errno.ETIME, "Timed out"))
timeout = True
# if the task finished with no results, something bad happened, e.g.
# undetected timeout
if not self.task.taskResults() and not self.task.taskException():
self.task.setResult(S_ERROR("Task produced no results"))
noResults = True
# # check results and callbacks presence, put task to results queue
if self.task.hasCallback() or self.task.hasPoolCallback():
self.__resultsQueue.put(task)
if timeout or noResults:
# The task execution timed out, stop the process to prevent it from running
# in the background
time.sleep(1)
os.kill(self.pid, signal.SIGKILL)
return
# # increase task counter
taskCounter += 1
self.__taskCounter = taskCounter
# # toggle __working flag
self.__working.value = 0
class ProcessTask(object):
""" Defines task to be executed in WorkingProcess together with its callbacks.
"""
# # taskID
taskID = 0
def __init__(self,
taskFunction,
args=None,
kwargs=None,
taskID=None,
callback=None,
exceptionCallback=None,
usePoolCallbacks=False,
timeOut=0):
""" c'tor
:warning: taskFunction has to be callable: it could be a function, lambda OR a class with
__call__ operator defined. But be carefull with interpretation of args and kwargs, as they
are passed to different places in above cases:
1. for functions or lambdas args and kwargs are just treated as function parameters
2. for callable classess (say MyTask) args and kwargs are passed to class contructor
(MyTask.__init__) and MyTask.__call__ should be a method without parameters, i.e.
MyTask definition should be::
class MyTask:
def __init__( self, *args, **kwargs ):
...
def __call__( self ):
...
:warning: depending on :timeOut: value, taskFunction execution can be forcefully terminated
using SIGALRM after :timeOut: seconds spent, :timeOut: equal to zero means there is no any
time out at all, except those during :ProcessPool: finalization
:param self: self reference
:param mixed taskFunction: definition of callable object to be executed in this task
:param tuple args: non-keyword arguments
:param dict kwargs: keyword arguments
:param int taskID: task id, if not set,
:param int timeOut: estimated time to execute taskFunction in seconds (default = 0, no timeOut at all)
:param mixed callback: result callback function
:param mixed exceptionCallback: callback function to be fired upon exception in taskFunction
"""
self.__taskFunction = taskFunction
self.__taskArgs = args or []
self.__taskKwArgs = kwargs or {}
self.__taskID = taskID
self.__resultCallback = callback
self.__exceptionCallback = exceptionCallback
self.__timeOut = 0
# # set time out
self.setTimeOut(timeOut)
self.__done = False
self.__exceptionRaised = False
self.__taskException = None
self.__taskResult = None
self.__usePoolCallbacks = usePoolCallbacks
def taskResults(self):
"""
Get task results
:param self: self reference
"""
return self.__taskResult
def taskException(self):
"""
Get task exception
:param self: self reference
"""
return self.__taskException
def enablePoolCallbacks(self):
"""
(re)enable use of ProcessPool callbacks
"""
self.__usePoolCallbacks = True
def disablePoolCallbacks(self):
"""
Disable execution of ProcessPool callbacks
"""
self.__usePoolCallbacks = False
def usePoolCallbacks(self):
"""
Check if results should be processed by callbacks defined in the :ProcessPool:
:param self: self reference
"""
return self.__usePoolCallbacks
def hasPoolCallback(self):
"""
Check if asked to execute :ProcessPool: callbacks
:param self: self reference
"""
return self.__usePoolCallbacks
def setTimeOut(self, timeOut):
"""
Set time out (in seconds)
:param self: selt reference
:param int timeOut: new time out value
"""
try:
self.__timeOut = int(timeOut)
return S_OK(self.__timeOut)
except (TypeError, ValueError) as error:
return S_ERROR(str(error))
def getTimeOut(self):
"""
Get timeOut value
:param self: self reference
"""
return self.__timeOut
def hasTimeOutSet(self):
"""
Check if timeout is set
:param self: self reference
"""
return bool(self.__timeOut != 0)
def getTaskID(self):
"""
TaskID getter
:param self: self reference
"""
return self.__taskID
def hasCallback(self):
"""
Callback existence checking
:param self: self reference
:return: True if callback or exceptionCallback has been defined, False otherwise
"""
return self.__resultCallback or self.__exceptionCallback or self.__usePoolCallbacks
def exceptionRaised(self):
"""
Flag to determine exception in process
:param self: self reference
"""
return self.__exceptionRaised
def doExceptionCallback(self):
"""
Execute exceptionCallback
:param self: self reference
"""
if self.__done and self.__exceptionRaised and self.__exceptionCallback:
self.__exceptionCallback(self, self.__taskException)
def doCallback(self):
"""
Execute result callback function
:param self: self reference
"""
if self.__done and not self.__exceptionRaised and self.__resultCallback:
self.__resultCallback(self, self.__taskResult)
def setResult(self, result):
"""
Set taskResult to result
"""
self.__taskResult = result
def process(self):
"""
Execute task
:param self: self reference
"""
self.__done = True
try:
# # it's a function?
if isinstance(self.__taskFunction, FunctionType):
self.__taskResult = self.__taskFunction(*self.__taskArgs, **self.__taskKwArgs)
# # or a class?
elif type(self.__taskFunction) in (TypeType, ClassType):
# # create new instance
taskObj = self.__taskFunction(*self.__taskArgs, **self.__taskKwArgs)
# ## check if it is callable, raise TypeError if not
if not callable(taskObj):
raise TypeError("__call__ operator not defined not in %s class" % taskObj.__class__.__name__)
# ## call it at least
self.__taskResult = taskObj()
except Exception as x:
self.__exceptionRaised = True
if gLogger:
gLogger.exception("Exception in process of pool")
if self.__exceptionCallback or self.usePoolCallbacks():
retDict = S_ERROR('Exception')
retDict['Value'] = str(x)
retDict['Exc_info'] = sys.exc_info()[1]
self.__taskException = retDict
class ProcessPool(object):
"""
.. class:: ProcessPool
ProcessPool
This class is managing multiprocessing execution of tasks (:ProcessTask: instances) in a separate
sub-processes (:WorkingProcess:).
Pool depth
The :ProcessPool: is keeping required number of active workers all the time: slave workers are only created
when pendingQueue is being filled with tasks, not exceeding defined min and max limits. When pendingQueue is
empty, active workers will be cleaned up by themselves, as each worker has got built in
self-destroy mechanism after 10 idle loops.
Processing and communication
The communication between :ProcessPool: instance and slaves is performed using two :multiprocessing.Queues:
* pendingQueue, used to push tasks to the workers,
* resultsQueue for revert direction;
and one :multiprocessing.Event: instance (stopEvent), which is working as a fuse to destroy idle workers
in a clean manner.
Processing of task begins with pushing it into :pendingQueue: using :ProcessPool.queueTask: or
:ProcessPool.createAndQueueTask:. Every time new task is queued, :ProcessPool: is checking existance of
active and idle workers and spawning new ones when required. The task is then read and processed on worker
side. If results are ready and callback functions are defined, task is put back to the resultsQueue and it is
ready to be picked up by ProcessPool again. To perform this last step one has to call :ProcessPool.processResults:,
or alternatively ask for daemon mode processing, when this function is called again and again in
separate background thread.
Finalisation
Finalization for task processing is done in several steps:
* if pool is working in daemon mode, background result processing thread is joined and stopped
* :pendingQueue: is emptied by :ProcessPool.processAllResults: function, all enqueued tasks are executed
* :stopEvent: is set, so all idle workers are exiting immediately
* non-hanging workers are joined and terminated politelty
* the rest of workers, if any, are forcefully retained by signals: first by SIGTERM, and if is doesn't work
by SIGKILL
:warn: Be carefull and choose wisely :timeout: argument to :ProcessPool.finalize:. Too short time period can
cause that all workers will be killed.
"""
def __init__(self, minSize=2, maxSize=0, maxQueuedRequests=10,
strictLimits=True, poolCallback=None, poolExceptionCallback=None,
keepProcessesRunning=True):
""" c'tor
:param self: self reference
:param int minSize: minimal number of simultaniously executed tasks
:param int maxSize: maximal number of simultaniously executed tasks
:param int maxQueueRequests: size of pending tasks queue
:param bool strictLimits: flag to workers overcommitment
:param callable poolCallbak: results callback
:param callable poolExceptionCallback: exception callback
"""
# # min workers
self.__minSize = max(1, minSize)
# # max workers
self.__maxSize = max(self.__minSize, maxSize)
# # queue size
self.__maxQueuedRequests = maxQueuedRequests
# # flag to worker overcommit
self.__strictLimits = strictLimits
# # pool results callback
self.__poolCallback = poolCallback
# # pool exception callback
self.__poolExceptionCallback = poolExceptionCallback
# # pending queue
self.__pendingQueue = multiprocessing.Queue(self.__maxQueuedRequests)
# # results queue
self.__resultsQueue = multiprocessing.Queue(0)
# # stop event
self.__stopEvent = multiprocessing.Event()
# # keep processes running flag
self.__keepRunning = keepProcessesRunning
# # lock
self.__prListLock = threading.Lock()
# # workers dict
self.__workersDict = {}
# # flag to trigger workers draining
self.__draining = False
# # placeholder for daemon results processing
self.__daemonProcess = False
# # create initial workers
self.__spawnNeededWorkingProcesses()
def stopProcessing(self, timeout=10):
"""
Case fire
:param self: self reference
"""
self.finalize(timeout)
def startProcessing(self):
"""
Restart processing again
:param self: self reference
"""
self.__draining = False
self.__stopEvent.clear()
self.daemonize()
def setPoolCallback(self, callback):
"""
Set ProcessPool callback function
:param self: self reference
:param callable callback: callback function
"""
if callable(callback):
self.__poolCallback = callback
def setPoolExceptionCallback(self, exceptionCallback):
"""
Set ProcessPool exception callback function
:param self: self refernce
:param callable exceptionCallback: exsception callback function
"""
if callable(exceptionCallback):
self.__poolExceptionCallback = exceptionCallback
def getMaxSize(self):
"""
MaxSize getter
:param self: self reference
"""
return self.__maxSize
def getMinSize(self):
"""
MinSize getter
:param self: self reference
"""
return self.__minSize
def getNumWorkingProcesses(self):
"""
Count processes currently being executed
:param self: self reference
"""
counter = 0
self.__prListLock.acquire()
try:
counter = len([pid for pid, worker in self.__workersDict.iteritems() if worker.isWorking()])
finally:
self.__prListLock.release()
return counter
def getNumIdleProcesses(self):
"""
Count processes being idle
:param self: self reference
"""
counter = 0
self.__prListLock.acquire()
try:
counter = len([pid for pid, worker in self.__workersDict.iteritems() if not worker.isWorking()])
finally:
self.__prListLock.release()
return counter
def getFreeSlots(self):
""" get number of free slots available for workers
:param self: self reference
"""
return max(0, self.__maxSize - self.getNumWorkingProcesses())
def __spawnWorkingProcess(self):
"""
Create new process
:param self: self reference
"""
self.__prListLock.acquire()
try:
worker = WorkingProcess(self.__pendingQueue, self.__resultsQueue, self.__stopEvent, self.__keepRunning)
while worker.pid is None:
time.sleep(0.1)
self.__workersDict[worker.pid] = worker
finally:
self.__prListLock.release()
def __cleanDeadProcesses(self):
"""
Delete references of dead workingProcesses from ProcessPool.__workingProcessList
"""
# # check wounded processes
self.__prListLock.acquire()
try:
for pid, worker in self.__workersDict.items():
if not worker.is_alive():
del self.__workersDict[pid]
finally:
self.__prListLock.release()
def __spawnNeededWorkingProcesses(self):
"""
Create N working process (at least self.__minSize, but no more
than self.__maxSize)
:param self: self reference
"""
self.__cleanDeadProcesses()
# # if we're draining do not spawn new workers
if self.__draining or self.__stopEvent.is_set():
return
while len(self.__workersDict) < self.__minSize:
if self.__draining or self.__stopEvent.is_set():
return
self.__spawnWorkingProcess()
while self.hasPendingTasks() and \
self.getNumIdleProcesses() == 0 and \
len(self.__workersDict) < self.__maxSize:
if self.__draining or self.__stopEvent.is_set():
return
self.__spawnWorkingProcess()
time.sleep(0.1)
def queueTask(self, task, blocking=True, usePoolCallbacks=False):
"""
Enqueue new task into pending queue
:param self: self reference
:param ProcessTask task: new task to execute
:param bool blocking: flag to block if necessary and new empty slot is available (default = block)
:param bool usePoolCallbacks: flag to trigger execution of pool callbacks (default = don't execute)
"""
if not isinstance(task, ProcessTask):
raise TypeError("Tasks added to the process pool must be ProcessTask instances")
if usePoolCallbacks and (self.__poolCallback or self.__poolExceptionCallback):
task.enablePoolCallbacks()
self.__prListLock.acquire()
try:
self.__pendingQueue.put(task, block=blocking)
except Queue.Full:
self.__prListLock.release()
return S_ERROR("Queue is full")
finally:
self.__prListLock.release()
self.__spawnNeededWorkingProcesses()
# # throttle a bit to allow task state propagation
time.sleep(0.1)
return S_OK()
def createAndQueueTask(self,
taskFunction,
args=None,
kwargs=None,
taskID=None,
callback=None,
exceptionCallback=None,
blocking=True,
usePoolCallbacks=False,
timeOut=0):
"""
Create new processTask and enqueue it in pending task queue
:param self: self reference
:param mixed taskFunction: callable object definition (FunctionType, LambdaType, callable class)
:param tuple args: non-keyword arguments passed to taskFunction c'tor
:param dict kwargs: keyword arguments passed to taskFunction c'tor
:param int taskID: task Id
:param mixed callback: callback handler, callable object executed after task's execution
:param mixed exceptionCallback: callback handler executed if testFunction had raised an exception
:param bool blocking: flag to block queue if necessary until free slot is available
:param bool usePoolCallbacks: fire execution of pool defined callbacks after task callbacks
:param int timeOut: time you want to spend executing :taskFunction:
"""
task = ProcessTask(taskFunction, args, kwargs, taskID, callback, exceptionCallback, usePoolCallbacks, timeOut)
return self.queueTask(task, blocking)
def hasPendingTasks(self):
"""
Check if taks are present in pending queue
:param self: self reference
:warning: results may be misleading if elements put into the queue are big
"""
return not self.__pendingQueue.empty()
def isFull(self):
"""
Check in peding queue is full
:param self: self reference
:warning: results may be misleading if elements put into the queue are big
"""
return self.__pendingQueue.full()
def isWorking(self):
"""
Check existence of working subprocesses
:param self: self reference
"""
return not self.__pendingQueue.empty() or self.getNumWorkingProcesses()
def processResults(self):
"""
Execute tasks' callbacks removing them from results queue
:param self: self reference
"""
processed = 0
log = sLog.getSubLogger('WorkingProcess')
while True:
if (
not log.debug(
"Start loop (t=0) queue size = %d, processed = %d" %
(self.__resultsQueue.qsize(),
processed)) and processed == 0 and self.__resultsQueue.qsize()):
log.debug("Process results, queue size = %d" % self.__resultsQueue.qsize())
start = time.time()
self.__cleanDeadProcesses()
log.debug("__cleanDeadProcesses", 't=%.2f' % (time.time() - start))
if not self.__pendingQueue.empty():
self.__spawnNeededWorkingProcesses()
log.debug("__spawnNeededWorkingProcesses", 't=%.2f' % (time.time() - start))
time.sleep(0.1)
if self.__resultsQueue.empty():
if self.__resultsQueue.qsize():
log.warn("Results queue is empty but has non zero size", "%d" % self.__resultsQueue.qsize())
# We only commit suicide if we reach a backlog greater than the maximum number of workers
if self.__resultsQueue.qsize() > self.__maxSize:
return -1
else:
return 0
if processed == 0:
log.debug("Process results, but queue is empty...")
break
# # get task
task = self.__resultsQueue.get()
log.debug("__resultsQueue.get", 't=%.2f' % (time.time() - start))
# # execute callbacks
try:
task.doExceptionCallback()
task.doCallback()
log.debug("doCallback", 't=%.2f' % (time.time() - start))
if task.usePoolCallbacks():
if self.__poolExceptionCallback and task.exceptionRaised():
self.__poolExceptionCallback(task.getTaskID(), task.taskException())
if self.__poolCallback and task.taskResults():
self.__poolCallback(task.getTaskID(), task.taskResults())
log.debug("__poolCallback", 't=%.2f' % (time.time() - start))
except Exception as error:
log.exception("Exception in callback", lException=error)
pass
processed += 1
if processed:
log.debug("Processed %d results" % processed)
else:
log.debug("No results processed")
return processed
def processAllResults(self, timeout=10):
"""
Process all enqueued tasks at once
:param self: self reference
"""
start = time.time()
while self.getNumWorkingProcesses() or not self.__pendingQueue.empty():
self.processResults()
time.sleep(1)
if time.time() - start > timeout:
break
self.processResults()
def finalize(self, timeout=60):
"""
Drain pool, shutdown processing in more or less clean way
:param self: self reference
:param timeout: seconds to wait before killing
"""
# # start drainig
self.__draining = True
# # join deamon process
if self.__daemonProcess:
self.__daemonProcess.join(timeout)
# # process all tasks
self.processAllResults(timeout)
# # set stop event, all idle workers should be terminated
self.__stopEvent.set()
# # join idle workers
start = time.time()
log = sLog.getSubLogger("finalize")
nWorkers = 9999999
while self.__workersDict:
self.__cleanDeadProcesses()
if len(self.__workersDict) != nWorkers:
nWorkers = len(self.__workersDict)
log.debug("%d workers still active, timeout = %d" % (nWorkers, timeout))
if timeout <= 0 or time.time() - start >= timeout:
break
time.sleep(0.1)
# # second clean up - join and terminate workers
if self.__workersDict:
log.debug("After cleaning dead processes, %d workers still active, timeout = %d" %
(len(self.__workersDict), timeout))
for worker in self.__workersDict.values():
if worker.is_alive():
worker.terminate()
worker.join(5)
self.__cleanDeadProcesses()
# third clean up - kill'em all!!!
if self.__workersDict:
log.debug("After terminating processes, %d workers still active, timeout = %d, kill them" %
(len(self.__workersDict), timeout))
self.__filicide()
def __filicide(self):
"""
Kill all workers, kill'em all!
:param self: self reference
"""
while self.__workersDict:
pid = self.__workersDict.keys().pop(0)
worker = self.__workersDict[pid]
if worker.is_alive():
os.kill(pid, signal.SIGKILL)
del self.__workersDict[pid]
def daemonize(self):
"""
Make ProcessPool a finite being for opening and closing doors between
chambers.
Also just run it in a separate background thread to the death of
PID 0.
:param self: self reference
"""
if self.__daemonProcess:
return
self.__daemonProcess = threading.Thread(target=self.__backgroundProcess)
self.__daemonProcess.setDaemon(1)
self.__daemonProcess.start()
def __backgroundProcess(self):
"""
Daemon thread target
:param self: self reference
"""
while True:
if self.__draining:
return
self.processResults()
time.sleep(1)
def __del__(self):
"""
Delete slot
:param self: self reference
"""
self.finalize(timeout=10)
|
fstagni/DIRAC
|
Core/Utilities/ProcessPool.py
|
Python
|
gpl-3.0
| 33,005
|
[
"DIRAC"
] |
6c7169b9bfcbe7f5f42e6fdb531ce01f5359411185b050c1cd97e4d400f688d4
|
"""
Tests for discussion pages
"""
import datetime
from pytz import UTC
from uuid import uuid4
from nose.plugins.attrib import attr
from .helpers import UniqueCourseTest
from ..pages.lms.auto_auth import AutoAuthPage
from ..pages.lms.courseware import CoursewarePage
from ..pages.lms.discussion import (
DiscussionTabSingleThreadPage,
InlineDiscussionPage,
InlineDiscussionThreadPage,
DiscussionUserProfilePage,
DiscussionTabHomePage,
DiscussionSortPreferencePage,
)
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from ..fixtures.discussion import (
SingleThreadViewFixture,
UserProfileViewFixture,
SearchResultFixture,
Thread,
Response,
Comment,
SearchResult,
)
class DiscussionResponsePaginationTestMixin(object):
"""
A mixin containing tests for response pagination for use by both inline
discussion and the discussion tab
"""
def setup_thread(self, num_responses, **thread_kwargs):
"""
Create a test thread with the given number of responses, passing all
keyword arguments through to the Thread fixture, then invoke
setup_thread_page.
"""
thread_id = "test_thread_{}".format(uuid4().hex)
thread_fixture = SingleThreadViewFixture(
Thread(id=thread_id, commentable_id=self.discussion_id, **thread_kwargs)
)
for i in range(num_responses):
thread_fixture.addResponse(Response(id=str(i), body=str(i)))
thread_fixture.push()
self.setup_thread_page(thread_id)
def assert_response_display_correct(self, response_total, displayed_responses):
"""
Assert that various aspects of the display of responses are all correct:
* Text indicating total number of responses
* Presence of "Add a response" button
* Number of responses actually displayed
* Presence and text of indicator of how many responses are shown
* Presence and text of button to load more responses
"""
self.assertEqual(
self.thread_page.get_response_total_text(),
str(response_total) + " responses"
)
self.assertEqual(self.thread_page.has_add_response_button(), response_total != 0)
self.assertEqual(self.thread_page.get_num_displayed_responses(), displayed_responses)
self.assertEqual(
self.thread_page.get_shown_responses_text(),
(
None if response_total == 0 else
"Showing all responses" if response_total == displayed_responses else
"Showing first {} responses".format(displayed_responses)
)
)
self.assertEqual(
self.thread_page.get_load_responses_button_text(),
(
None if response_total == displayed_responses else
"Load all responses" if response_total - displayed_responses < 100 else
"Load next 100 responses"
)
)
def test_pagination_no_responses(self):
self.setup_thread(0)
self.assert_response_display_correct(0, 0)
def test_pagination_few_responses(self):
self.setup_thread(5)
self.assert_response_display_correct(5, 5)
def test_pagination_two_response_pages(self):
self.setup_thread(50)
self.assert_response_display_correct(50, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(50, 50)
def test_pagination_exactly_two_response_pages(self):
self.setup_thread(125)
self.assert_response_display_correct(125, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(125, 125)
def test_pagination_three_response_pages(self):
self.setup_thread(150)
self.assert_response_display_correct(150, 25)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 125)
self.thread_page.load_more_responses()
self.assert_response_display_correct(150, 150)
def test_add_response_button(self):
self.setup_thread(5)
self.assertTrue(self.thread_page.has_add_response_button())
self.thread_page.click_add_response_button()
def test_add_response_button_closed_thread(self):
self.setup_thread(5, closed=True)
self.assertFalse(self.thread_page.has_add_response_button())
@attr('shard_1')
class DiscussionTabSingleThreadTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for the discussion page displaying a single thread
"""
def setUp(self):
super(DiscussionTabSingleThreadTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
# Create a course to register for
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def setup_thread_page(self, thread_id):
self.thread_page = DiscussionTabSingleThreadPage(self.browser, self.course_id, thread_id) # pylint:disable=W0201
self.thread_page.visit()
@attr('shard_1')
class DiscussionCommentDeletionTest(UniqueCourseTest):
"""
Tests for deleting comments displayed beneath responses in the single thread view.
"""
def setUp(self):
super(DiscussionCommentDeletionTest, self).setUp()
# Create a course to register for
CourseFixture(**self.course_info).install()
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_deletion_test_thread"))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def test_comment_deletion_as_student(self):
self.setup_user()
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
def test_comment_deletion_as_moderator(self):
self.setup_user(roles=['Moderator'])
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_deletion_test_thread")
page.visit()
self.assertTrue(page.is_comment_deletable("comment_self_author"))
self.assertTrue(page.is_comment_deletable("comment_other_author"))
page.delete_comment("comment_self_author")
page.delete_comment("comment_other_author")
@attr('shard_1')
class DiscussionCommentEditTest(UniqueCourseTest):
"""
Tests for editing comments displayed beneath responses in the single thread view.
"""
def setUp(self):
super(DiscussionCommentEditTest, self).setUp()
# Create a course to register for
CourseFixture(**self.course_info).install()
def setup_user(self, roles=[]):
roles_str = ','.join(roles)
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id, roles=roles_str).visit().get_user_id()
def setup_view(self):
view = SingleThreadViewFixture(Thread(id="comment_edit_test_thread"))
view.addResponse(
Response(id="response1"),
[Comment(id="comment_other_author", user_id="other"), Comment(id="comment_self_author", user_id=self.user_id)])
view.push()
def edit_comment(self, page, comment_id):
page.start_comment_edit(comment_id)
new_comment = "edited body"
page.set_comment_editor_value(comment_id, new_comment)
page.submit_comment_edit(comment_id, new_comment)
def test_edit_comment_as_student(self):
self.setup_user()
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_visible("comment_other_author"))
self.assertFalse(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
def test_edit_comment_as_moderator(self):
self.setup_user(roles=["Moderator"])
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.edit_comment(page, "comment_self_author")
self.edit_comment(page, "comment_other_author")
def test_cancel_comment_edit(self):
self.setup_user()
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
page.set_comment_editor_value("comment_self_author", "edited body")
page.cancel_comment_edit("comment_self_author", original_body)
def test_editor_visibility(self):
"""Only one editor should be visible at a time within a single response"""
self.setup_user(roles=["Moderator"])
self.setup_view()
page = DiscussionTabSingleThreadPage(self.browser, self.course_id, "comment_edit_test_thread")
page.visit()
self.assertTrue(page.is_comment_editable("comment_self_author"))
self.assertTrue(page.is_comment_editable("comment_other_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_add_comment_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.set_comment_editor_value("comment_self_author", "edited body")
page.start_comment_edit("comment_other_author")
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_comment_editor_visible("comment_other_author"))
self.assertEqual(page.get_comment_body("comment_self_author"), original_body)
page.start_response_edit("response1")
self.assertFalse(page.is_comment_editor_visible("comment_other_author"))
self.assertTrue(page.is_response_editor_visible("response1"))
original_body = page.get_comment_body("comment_self_author")
page.start_comment_edit("comment_self_author")
self.assertFalse(page.is_response_editor_visible("response1"))
self.assertTrue(page.is_comment_editor_visible("comment_self_author"))
page.cancel_comment_edit("comment_self_author", original_body)
self.assertFalse(page.is_comment_editor_visible("comment_self_author"))
self.assertTrue(page.is_add_comment_visible("response1"))
@attr('shard_1')
class InlineDiscussionTest(UniqueCourseTest, DiscussionResponsePaginationTestMixin):
"""
Tests for inline discussions
"""
def setUp(self):
super(InlineDiscussionTest, self).setUp()
self.discussion_id = "test_discussion_{}".format(uuid4().hex)
self.course_fix = CourseFixture(**self.course_info).add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
XBlockFixtureDesc("vertical", "Test Unit").add_children(
XBlockFixtureDesc(
"discussion",
"Test Discussion",
metadata={"discussion_id": self.discussion_id}
)
)
)
)
).install()
self.user_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.courseware_page.visit()
self.discussion_page = InlineDiscussionPage(self.browser, self.discussion_id)
def setup_thread_page(self, thread_id):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 1)
self.thread_page = InlineDiscussionThreadPage(self.browser, thread_id) # pylint:disable=W0201
self.thread_page.expand()
def test_initial_render(self):
self.assertFalse(self.discussion_page.is_discussion_expanded())
def test_expand_discussion_empty(self):
self.discussion_page.expand_discussion()
self.assertEqual(self.discussion_page.get_num_displayed_threads(), 0)
def check_anonymous_to_peers(self, is_staff):
thread = Thread(id=uuid4().hex, anonymous_to_peers=True, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertEqual(self.thread_page.is_thread_anonymous(), not is_staff)
def test_anonymous_to_peers_threads_as_staff(self):
AutoAuthPage(self.browser, course_id=self.course_id, roles="Administrator").visit()
self.courseware_page.visit()
self.check_anonymous_to_peers(True)
def test_anonymous_to_peers_threads_as_peer(self):
self.check_anonymous_to_peers(False)
def test_discussion_blackout_period(self):
now = datetime.datetime.now(UTC)
self.course_fix.add_advanced_settings(
{
u"discussion_blackouts": {
"value": [
[
(now - datetime.timedelta(days=14)).isoformat(),
(now + datetime.timedelta(days=2)).isoformat()
]
]
}
}
)
self.course_fix._add_advanced_settings()
self.browser.refresh()
thread = Thread(id=uuid4().hex, commentable_id=self.discussion_id)
thread_fixture = SingleThreadViewFixture(thread)
thread_fixture.addResponse(
Response(id="response1"),
[Comment(id="comment1", user_id="other"), Comment(id="comment2", user_id=self.user_id)])
thread_fixture.push()
self.setup_thread_page(thread.get("id"))
self.assertFalse(self.discussion_page.element_exists(".new-post-btn"))
self.assertFalse(self.thread_page.has_add_response_button())
self.assertFalse(self.thread_page.is_response_editable("response1"))
self.assertFalse(self.thread_page.is_add_comment_visible("response1"))
self.assertFalse(self.thread_page.is_comment_editable("comment1"))
self.assertFalse(self.thread_page.is_comment_editable("comment2"))
self.assertFalse(self.thread_page.is_comment_deletable("comment1"))
self.assertFalse(self.thread_page.is_comment_deletable("comment2"))
@attr('shard_1')
class DiscussionUserProfileTest(UniqueCourseTest):
"""
Tests for user profile page in discussion tab.
"""
PAGE_SIZE = 20 # django_comment_client.forum.views.THREADS_PER_PAGE
PROFILED_USERNAME = "profiled-user"
def setUp(self):
super(DiscussionUserProfileTest, self).setUp()
CourseFixture(**self.course_info).install()
# The following line creates a user enrolled in our course, whose
# threads will be viewed, but not the one who will view the page.
# It isn't necessary to log them in, but using the AutoAuthPage
# saves a lot of code.
self.profiled_user_id = AutoAuthPage(
self.browser,
username=self.PROFILED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# now create a second user who will view the profile.
self.user_id = AutoAuthPage(
self.browser,
course_id=self.course_id
).visit().get_user_id()
def check_pages(self, num_threads):
# set up the stub server to return the desired amount of thread results
threads = [Thread(id=uuid4().hex) for _ in range(num_threads)]
UserProfileViewFixture(threads).push()
# navigate to default view (page 1)
page = DiscussionUserProfilePage(
self.browser,
self.course_id,
self.profiled_user_id,
self.PROFILED_USERNAME
)
page.visit()
current_page = 1
total_pages = max(num_threads - 1, 1) / self.PAGE_SIZE + 1
all_pages = range(1, total_pages + 1)
def _check_page():
# ensure the page being displayed as "current" is the expected one
self.assertEqual(page.get_current_page(), current_page)
# ensure the expected threads are being shown in the right order
threads_expected = threads[(current_page - 1) * self.PAGE_SIZE:current_page * self.PAGE_SIZE]
self.assertEqual(page.get_shown_thread_ids(), [t["id"] for t in threads_expected])
# ensure the clickable page numbers are the expected ones
self.assertEqual(page.get_clickable_pages(), [
p for p in all_pages
if p != current_page
and p - 2 <= current_page <= p + 2
or (current_page > 2 and p == 1)
or (current_page < total_pages and p == total_pages)
])
# ensure the previous button is shown, but only if it should be.
# when it is shown, make sure it works.
if current_page > 1:
self.assertTrue(page.is_prev_button_shown(current_page - 1))
page.click_prev_page()
self.assertEqual(page.get_current_page(), current_page - 1)
page.click_next_page()
self.assertEqual(page.get_current_page(), current_page)
else:
self.assertFalse(page.is_prev_button_shown())
# ensure the next button is shown, but only if it should be.
if current_page < total_pages:
self.assertTrue(page.is_next_button_shown(current_page + 1))
else:
self.assertFalse(page.is_next_button_shown())
# click all the way up through each page
for i in range(current_page, total_pages):
_check_page()
if current_page < total_pages:
page.click_on_page(current_page + 1)
current_page += 1
# click all the way back down
for i in range(current_page, 0, -1):
_check_page()
if current_page > 1:
page.click_on_page(current_page - 1)
current_page -= 1
def test_0_threads(self):
self.check_pages(0)
def test_1_thread(self):
self.check_pages(1)
def test_20_threads(self):
self.check_pages(20)
def test_21_threads(self):
self.check_pages(21)
def test_151_threads(self):
self.check_pages(151)
@attr('shard_1')
class DiscussionSearchAlertTest(UniqueCourseTest):
"""
Tests for spawning and dismissing alerts related to user search actions and their results.
"""
SEARCHED_USERNAME = "gizmo"
def setUp(self):
super(DiscussionSearchAlertTest, self).setUp()
CourseFixture(**self.course_info).install()
# first auto auth call sets up a user that we will search for in some tests
self.searched_user_id = AutoAuthPage(
self.browser,
username=self.SEARCHED_USERNAME,
course_id=self.course_id
).visit().get_user_id()
# this auto auth call creates the actual session user
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.page = DiscussionTabHomePage(self.browser, self.course_id)
self.page.visit()
def setup_corrected_text(self, text):
SearchResultFixture(SearchResult(corrected_text=text)).push()
def check_search_alert_messages(self, expected):
actual = self.page.get_search_alert_messages()
self.assertTrue(all(map(lambda msg, sub: msg.lower().find(sub.lower()) >= 0, actual, expected)))
def test_no_rewrite(self):
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_dismiss(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.page.dismiss_alert_message("foo")
self.check_search_alert_messages([])
def test_new_search(self):
self.setup_corrected_text("foo")
self.page.perform_search()
self.check_search_alert_messages(["foo"])
self.setup_corrected_text("bar")
self.page.perform_search()
self.check_search_alert_messages(["bar"])
self.setup_corrected_text(None)
self.page.perform_search()
self.check_search_alert_messages(["no threads"])
def test_rewrite_and_user(self):
self.setup_corrected_text("foo")
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["foo", self.SEARCHED_USERNAME])
def test_user_only(self):
self.setup_corrected_text(None)
self.page.perform_search(self.SEARCHED_USERNAME)
self.check_search_alert_messages(["no threads", self.SEARCHED_USERNAME])
# make sure clicking the link leads to the user profile page
UserProfileViewFixture([]).push()
self.page.get_search_alert_links().first.click()
DiscussionUserProfilePage(
self.browser,
self.course_id,
self.searched_user_id,
self.SEARCHED_USERNAME
).wait_for_page()
@attr('shard_1')
class DiscussionSortPreferenceTest(UniqueCourseTest):
"""
Tests for the discussion page displaying a single thread.
"""
def setUp(self):
super(DiscussionSortPreferenceTest, self).setUp()
# Create a course to register for.
CourseFixture(**self.course_info).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.sort_page = DiscussionSortPreferencePage(self.browser, self.course_id)
self.sort_page.visit()
def test_default_sort_preference(self):
"""
Test to check the default sorting preference of user. (Default = date )
"""
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, "date")
def test_change_sort_preference(self):
"""
Test that if user sorting preference is changing properly.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
def test_last_preference_saved(self):
"""
Test that user last preference is saved.
"""
selected_sort = ""
for sort_type in ["votes", "comments", "date"]:
self.assertNotEqual(selected_sort, sort_type)
self.sort_page.change_sort_preference(sort_type)
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
self.sort_page.refresh_page()
selected_sort = self.sort_page.get_selected_sort_preference()
self.assertEqual(selected_sort, sort_type)
|
xiandiancloud/edxplaltfom-xusong
|
common/test/acceptance/tests/test_discussion.py
|
Python
|
agpl-3.0
| 24,307
|
[
"VisIt"
] |
3c2daafd3d608be90129d5ce746d50d2aba2383a2c9e37d9c74f9984dcc74434
|
#!/usr/bin/env python
import numpy as np
from astropy.io import fits
class Up_parse:
"""
The class is to parse a UVES_popler output file.
It provides wavelength, flux, sigma_error, mask (valid pixels) arrays.
"""
def __init__(self, path_to_fits,off_set=0.0):
shift = 1. + off_set/299792.458 # (1+v/c)
fits_opened = fits.open(path_to_fits)
self.flux = fits_opened[0].data[0]
self.length = self.flux.size
self.error = fits_opened[0].data[1]
#self.error = error[error == np.inf] = -0.5 # replace the pixels with zero ivar
self.wave = self.pix2wave(np.arange(self.length),fits_opened)*shift
#self.wave = np.array([self.pix2wave(pix,fits_opened)*shift for pix in range(fits_opened[0].data[0].size)])
fits_opened.close()
#self.weight = np.array([1.0 if mask else 1E-10 for mask in self.mask]) # no working in spline?
def pix2wave(self,pixel,fits_opened):
"""
Convert a pixel number into a wavelength.
It is based on the info from the header.
"""
log_lin = fits_opened[0].header['DC-FLAG']
if log_lin == 1:
wl_0 = 10**fits_opened[0].header['CRVAL1']
CRPIX1 = fits_opened[0].header['CRPIX1']
log_disp = fits_opened[0].header['CD1_1']
w_i=wl_0*10**(((pixel+1)-CRPIX1)*log_disp)
else:
sys.exit('wavelength scale should be log-linear!')
return w_i
def convolve(self, res, file_name):
"""
Convolves the spectrum using the Barak package.
res is a path to the file with Gaussian kernel specified with '--fwhm'.
"""
from barak.convolve import convolve_constant_dv
if res == None:
self.flux = convolve_constant_dv(self.wave,self.flux,wa_dv=self.wave,npix=4)
else:
self.flux = convolve_constant_dv(self.wave,self.flux,vfwhm=self.spec_fwhm_file(res)[file_name])
|
ezavarygin/specplot
|
specplot_tools/up_parse.py
|
Python
|
mit
| 2,000
|
[
"Gaussian"
] |
badc6e2e0ef79582d59260b2edb57ffd6e2a77523171102558a79bc986818681
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.fitting.reinitialization Contains the FittingReinitializer class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from .component import FittingComponent
from ..component.galaxy import GalaxyModelingComponent
from ...core.basics.log import log
from ..misc.select import select_from_model_suite, select_from_fitting_context, select_from_analysis_context
# -----------------------------------------------------------------
class FittingReinitializer(FittingComponent, GalaxyModelingComponent):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
This function ...
:param args:
:param kwargs:
"""
# Call the construcor of the base classes
#super(FittingReinitializer, self).__init__(*args, **kwargs)
FittingComponent.__init__(self, no_config=True)
GalaxyModelingComponent.__init__(self, *args, **kwargs)
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Load
self.load_model()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
Thisn function ...
:param kwargs:
:return:
"""
# Call the setup fucntion of the base class
#super(FittingReinitializer, self).setup(**kwargs)
FittingComponent.setup(self, **kwargs)
GalaxyModelingComponent.setup(self, **kwargs)
# -----------------------------------------------------------------
@property
def from_model(self):
"""
This function ...
:return:
"""
return self.config.origin == "model"
# -----------------------------------------------------------------
@property
def from_fitting_run(self):
"""
This function ...
:return:
"""
return self.config.origin == "fitting_run"
# -----------------------------------------------------------------
@property
def from_analysis_run(self):
"""
This function ...
:return:
"""
return self.config.origin == "analysis_run"
# -----------------------------------------------------------------
def load_model(self):
"""
This fucntion ...
:return:
"""
# Inform the user
log.info("Getting the model ...")
# Load from model
if self.from_model:
# Get the model
model_name = self.prompt_model()
# Update the analysis info
#self.analysis_run_info.model_name = model_name
#self.analysis_run_info.parameter_values = self.parameter_values
# Prompt for a fitting run
elif self.from_fitting_run:
# Get the model
run_id, generation_name, simulation_name, chi_squared = self.prompt_fitting()
# Update the analysis info
#self.analysis_run_info.fitting_run = run_id
#self.analysis_run_info.generation_name = generation_name
#self.analysis_run_info.simulation_name = simulation_name
#self.analysis_run_info.chi_squared = chi_squared
#self.analysis_run_info.parameter_values = self.parameter_values
# Set the name of the corresponding model of the model suite
#self.analysis_run_info.model_name = self.definition.name
# From an analysis run
elif self.from_analysis_run:
# Get the model
self.prompt_analysis()
# Invalid
else: raise ValueError("Invalid value for 'origin'")
# -----------------------------------------------------------------
def prompt_model(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Prompting for the model from the model suite ...")
# Select the model
model_name, ski, definition, input_paths, parameter_values = select_from_model_suite(self.model_suite, adapt=self.config.adapt, name=self.config.model_name)
# Set attributes
self.ski = ski
self.definition = definition
self.parameter_values = parameter_values
self.input_paths = input_paths
# Set the 'distance' parameter value, since instruments are still not adapted from the default template.
# Instruments will only be added to the ski file later, so the parameter value obtained from the 'distance'
# label in 'select_from_model_suite' is still incorrect
# Other instrument properties should have been fixed (with the 'fix_labels' function)
self.parameter_values["distance"] = self.galaxy_distance
# Return the model name
return model_name
# -----------------------------------------------------------------
def prompt_fitting(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Prompting for the model from the fitting context ...")
# Select the model
run_id, generation_name, simulation_name, fitting_run, chi_squared, ski, input_paths, parameter_values = select_from_fitting_context(self.fitting_context)
# Load the model definition
definition = fitting_run.model_definition
# Set attributes
self.ski = ski
self.definition = definition
self.parameter_values = parameter_values
self.input_paths = input_paths
# Return the fitting run ID, generation name, simulation name, and chi_squared
return run_id, generation_name, simulation_name, chi_squared
# -----------------------------------------------------------------
def prompt_analysis(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Prompting for the model from an analysis run ...")
# Select
run_name, analysis_run = select_from_analysis_context(self.analysis_context)
# TODO: further implement ...
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/fitting/reinitialization.py
|
Python
|
agpl-3.0
| 6,747
|
[
"Galaxy"
] |
72bf6a5089b0eabd4c15f7b464505989a881fb9f65af45fb8a9e0fcdedc0d9b9
|
from ANNarchy.core.Projection import Projection
from ANNarchy.core.Synapse import Synapse
import ANNarchy.core.Global as Global
import numpy as np
from ANNarchy.generator.Utils import tabify # _generate_bank_code, _generate_convolve_code
# Indices used for each dimension
indices = ['i', 'j', 'k', 'l', 'm', 'n']
###############################
### Shared synapse for report()
###############################
class SharedSynapse(Synapse):
# For reporting
_instantiated = []
def __init__(self, psp, operation):
Synapse.__init__(self,
psp=psp, operation=operation,
name="Shared Weight",
description="Weight shared over all synapses of the projection."
)
# For reporting
self._instantiated.append(True)
###############################
### Shared projection
###############################
class SharedProjection(Projection):
"""
"""
def __init__(self, pre, post, target, psp="w * pre.r", operation="sum", name=None, copied=False):
"""
Projection based on shared weights: each post-synaptic neuron uses the same weights, so they need to be instantiated only once to save memory.
Learning is not possible for now. The ``synapse`` argument is removed, replaced by a single ``psp`` argument to modified what is summed and ``operation`` to replace the summation operation by max-pooling or similar..
:param pre: pre-synaptic population (either its name or a ``Population`` object).
:param post: post-synaptic population (either its name or a ``Population`` object).
:param target: type of the connection.
:param psp: function to be summed. By default: ``w * pre.r``
:param operation: function applied on ``psp`` ("sum", "max", "min", "mean"). "sum" is the default.
"""
self.psp_init = psp
self.operation = operation
# Create the description, but it will not be used for generation
Projection.__init__(
self,
pre=pre,
post=post,
target=target,
synapse = SharedSynapse(psp=psp, operation=operation),
name=name,
copied=copied
)
self._omp_config['psp_schedule'] = 'schedule(dynamic)'
if not Global.config["paradigm"] == "openmp":
Global._error('SharedProjection: Weight sharing is only implemented for the OpenMP paradigm.')
if not pre.neuron_type.type == 'rate':
Global._error('SharedProjection: Weight sharing is only implemented for rate-coded populations.')
def _copy(self, pre, post):
"Returns a copy of the projection when creating networks. Internal use only."
return SharedProjection(pre=self.pre, post=self.post, target=self.target, psp=self.psp_init, operation=self.operation, name=self.name, copied=True)
def _create(self):
# create fake LIL object, just for compilation.
try:
from ANNarchy.core.cython_ext.Connector import LILConnectivity
except Exception as e:
Global._print(e)
Global._error('ANNarchy was not successfully installed.')
lil = LILConnectivity()
lil.max_delay = self.delays
lil.uniform_delay = self.delays
self.connector_name = "Shared weights"
self.connector_description = "Shared weights"
self._store_connectivity(self._load_from_lil, (lil, ), self.delays)
def _connect(self, module):
"""
Builds up dendrites either from list or dictionary. Called by instantiate().
"""
if not self._connection_method:
Global._error('SharedProjection: The projection between ' + self.pre.name + ' and ' + self.post.name + ' is declared but not connected.')
# Create the Cython instance
proj = getattr(module, 'proj'+str(self.id)+'_wrapper')
self.cyInstance = proj(self.weights, self.pre_coordinates)
# Define the list of postsynaptic neurons
self.post_ranks = list(range(self.post.size))
# Set delays after instantiation
if self.delays > 0.0:
self.cyInstance.set_delay(self.delays/Global.config['dt'])
def center(self, *args, **kwds):
"""
Returns the coordinates in the pre-synaptic population of the center of the kernel corresponding to the post-synaptic with the given rank or coordinates.
:param rank: rank or coordinates of the post-synaptic neuron. If only one argument is given, it is a rank. If it is a tuple, it is coordinates.
"""
if len(args) == 1:
rank = args[0]
else:
rank = self.post.rank_from_coordinates(args)
if self.initialized:
return tuple(self.cyInstance.pre_rank(rank))
else:
return tuple(self.pre_coordinates[rank])
def _data(self):
"Disable saving."
desc = {}
desc['post_ranks'] = self.post_ranks
desc['attributes'] = self.attributes
desc['parameters'] = self.parameters
desc['variables'] = self.variables
desc['dendrites'] = []
desc['number_of_synapses'] = 0
return desc
################################
### Connection methods
################################
def convolve(self, weights, delays=0.0, method='convolution', keep_last_dimension=False, multiple=False, padding=0.0, subsampling=None):
"""
Builds the shared connection pattern that will perform a convolution of the weights kernel on the pre-synaptic population.
Depending on the number of dimensions of the pre- and post-synaptic populations, as well as the kernel, the convolution can be implemented differentely.
* If the pre- and post-populations have the same dimension as the kernel, the convolution is regular.
* If the post-population has one dimension less than the pre-synaptic one, the last dimension of the kernel must match the last one of the pre-synaptic population. For example, filtering a N*M*3 image with a 3D filter (3 elements in the third dimension) results into a 2D population.
* If the kernel has less dimensions than the two populations, the number of neurons in the last dimension of the populations must be the same. The convolution will be calculated for each position in the last dimension (parallel convolution, useful if the pre-synaptic population is a stack of feature maps, for example). In this case, you must set ``keep_last_dimension`` to True.
* If the kernel has more dimensions than the pre-synaptic population, this means a bank of different filters will be applied on the pre-synaptic population. Attention: the first index of ``weights`` corresponds to the different filters, while the result will be accessible in the last dimension of the post-synaptic population. You must set the ``multiple`` argument to True.
Sub-sampling will be automatically performed according to the populations' geometry. If these geometries do not match, an error will be thrown. You can force sub-sampling by providing a list ``subsampling`` as argument, defining for each post-synaptic neuron the coordinates of the pre-synaptic neuron which will be the center of the filter/kernel.
:param weights: Numpy array or list of lists representing the matrix of weights for the filter/kernel.
:param delays: delay in synaptic transmission (default: dt). Can only be the same value for all neurons.
:param method: defines if the given weights are filter-based (dot-product between the filter and sub-region: 'filter') or kernel-based (regular convolution: 'convolution').. Default: 'convolution'.
:param keep_last_dimension: defines if the last dimension of the pre- and post-synaptic will be convolved in parallel. The weights matrix must have one dimension less than the pre-synaptic population, and the number of neurons in the last dimension of the pre- and post-synaptic populations must match. Default: False.
:param multiple: defines if the weights matrix describes a bank of filters which have to applied in parallel. The weights matrix must have one dimension more than the pre-synaptic populations, and the number of neurons in the last dimension of the post-synaptic population must be equal to the number of filters.
:param padding: value to be used for the rates outside the pre-synaptic population. If it is a floating value, the pre-synaptic population is virtually extended with this value above its boundaries. If it is equal to 'border', the values on the boundaries are repeated. Default: 0.0.
:param subsampling: list for each post-synaptic neuron of coordinates in the pre-synaptic population defining the center of the kernel/filter. Default: None.
"""
self._operation_type = 'convolve'
self.method = method
self.keep_last_dimension = keep_last_dimension
self.multiple = multiple
self.padding = padding
self.subsampling = subsampling
# Process the weights
if isinstance(weights, list):
self.weights = np.array(weights)
else:
self.weights = weights
# Process the delays
self.delays = delays
if not isinstance(delays, (int, float)):
Global._error('Shared projections can only have uniform delays.')
# Check dimensions of populations and weight matrix
self.dim_kernel = self.weights.ndim
self.dim_pre = self.pre.dimension
self.dim_post = self.post.dimension
if self.dim_post > 4:
Global._error('SharedProjection: Too many dimensions for the post-synaptic population (maximum 4).')
if self.dim_pre > 4:
Global._error('SharedProjection: Too many dimensions for the pre-synaptic population (maximum 4).')
if self.dim_kernel > 5 or (not self.multiple and self.dim_kernel > 4):
Global._error('SharedProjection: Too many dimensions for the kernel (maximum 4).')
# Check if the last axes match for parallel convolution (e.g. 3-2-3)
if self.dim_kernel < self.dim_pre:
if not self.keep_last_dimension:
Global._error('SharedProjection: If the kernel has less dimensions than the pre-synaptic population, you need to set the flag keep_last_dimension to True.')
if self.pre.geometry[-1] != self.post.geometry[-1]:
Global._error('SharedProjection: If the kernel has fewer dimensions than the two populations (keep_last_dimension=True), these must have the same number of neurons in the last dimension.')
# If the last dim of the kernel matches the last dim of the pre-pop, the last pop can have one dimension less.
if self.dim_post < self.dim_pre: # OK, but check the last dimension of the kernel has the same size as the post-population
if self.weights.shape[-1] != self.pre.geometry[-1]:
Global._error('SharedProjection: If the post-synaptic population has less dimensions than the pre-synaptic one, the last dimension of the filter must be equal to the last of the pre-synaptic population.')
# Check if it is a bank of filters
if self.dim_kernel > self.dim_pre:
if not self.multiple:
Global._error('SharedProjection: If the kernel has more dimensions than the pre-synaptic population, you need to set the flag multiple to True.')
# if self.dim_kernel > self.dim_post:
# if not self.keep_last_dimension:
# Global._error('If the kernel has more dimensions than the post-synaptic population, you need to set the flag keep_last_dimension to True.')
#
if self.weights.shape[0] != self.post.geometry[-1]:
Global._error('SharedProjection: For multiple filters, the last dimension of the post-synaptic population must have as many neurons as there are filters.')
# Generate the pre-synaptic coordinates
if not self.multiple:
self._generate_pre_coordinates()
else:
self._generate_pre_coordinates_bank()
# Finish building the synapses
self._create()
return self
def pooling(self, delays=0.0, extent=None, overlap=None):
"""
Builds the shared connection pattern that will perform a pooling operation over the pre-synaptic population.
Each post-synaptic neuron is associated to a region of the pre-synaptic one, over which the result of the operation on firing rates will be assigned to sum(target).
If the SharedProjection does not define an operation, the default is "sum". If you want max-pooling, you should set it to "max".
:param delays: delays (in ms) in synaptic transmission. Must be a single value for all neurons.
:param extent: Extent of the pooling area expressed in the geometry of the pre-synaptic population. In each dimension, the product of this extent with the number of neurons in the post-synaptic population must be equal to the number of pre-synaptic neurons.
:param overlap: TODO, not implemented yet.
"""
self._operation_type = 'pooling'
self.weights = []
if extent is None: # compute the extent automatically
if self.pre.dimension != self.post.dimension:
Global._error('SharedProjection: If you do not provide the extent parameter, the two populations must have the same dimensions.')
extent = list(self.pre.geometry)
for dim in range(self.pre.dimension):
extent[dim] /= self.post.geometry[dim]
if self.pre.geometry[dim] != extent[dim] * self.post.geometry[dim] :
Global._error('SharedProjection: Unable to compute the extent of the pooling area: the number of neurons do not match.')
elif not isinstance(extent, tuple):
Global._error('SharedProjection: You must provide a tuple for the extent of the pooling operation.')
self.extent = list(extent)
if len(self.extent) < self.pre.dimension:
Global._error('SharedProjection: You must provide a tuple for the extent of the pooling operation.')
# Process the delays
self.delays = delays
# Change the psp by default
if self.synapse_type.description['raw_psp'] == "w * pre.r":
self.synapse_type.description['psp']['cpp'] = "%(pre_prefix)sr%(pre_index)s"
# Check dimensions of populations
self.dim_pre = self.pre.dimension
self.dim_post = self.post.dimension
if self.dim_post > 4:
Global._error('SharedProjection: Too many dimensions for the post-synaptic population (maximum 4).')
if self.dim_pre > 4:
Global._error('SharedProjection: Too many dimensions for the pre-synaptic population (maximum 4).')
# Generate the pre-synaptic coordinates
self._generate_extent_coordinates()
# Finish building the synapses
self._create()
return self
def copy(self, projection):
"""
Creates a virtual connection pattern reusing the weights and delays of an already-defined projection.
Although the original projection can be learnable, this one can not. Changes in the original weights will be reflected in this projection. The only possible modifications are ``psp`` and ``operation``.
The pre- and post-synaptic populations of each projection must have the same geometry.
:param projection: the projection to reuse.
"""
self._operation_type = 'copy'
self.projection = projection
if not isinstance(self.projection, Projection):
Global._error('SharedProjection: You must provide an existing projection to copy().')
if isinstance(self.projection, SharedProjection):
Global._error('SharedProjection: You can only copy regular projections, not shared projections.')
if not self.pre.geometry == self.projection.pre.geometry or not self.post.geometry == self.projection.post.geometry:
Global._error('SharedProjection: When copying a projection, the geometries must be the same.')
# Dummy weights
self.weights = None
self.pre_coordinates = []
# Finish building the synapses
self._create()
return self
################################
### Generate centers
################################
def _generate_extent_coordinates(self):
" Returns a list for each post neuron of the corresponding top-left coordinates."
# Generates coordinates TODO: Find a more robust way!
coords = [[] for i in range(self.post.size)]
if self.dim_pre == 1 :
rk = 0
for i in range(self.post.geometry[0]):
coords[rk] = [i * self.extent[0]]
rk += 1
elif self.dim_pre == 2 :
rk = 0
for i in range(self.post.geometry[0]):
if self.dim_post > 1:
for j in range(self.post.geometry[1]):
coords[rk] = [i * self.extent[0], j * self.extent[1]]
rk += 1
else: # over the whole second axis
coords[rk] = [i * self.extent[0], 0]
rk += 1
elif self.dim_pre == 3 :
rk = 0
for i in range(self.post.geometry[0]):
for j in range(self.post.geometry[1]):
if self.dim_post > 2:
for k in range(self.post.geometry[2]):
coords[rk] = [i * self.extent[0], j * self.extent[1], k * self.extent[2]]
rk += 1
else: # over the whole third axis
coords[rk] = [i * self.extent[0], j * self.extent[1], 0]
rk += 1
elif self.dim_pre == 4 : # TODO: post has less than 4 dimensions
rk = 0
for i in range(self.post.geometry[0]):
for j in range(self.post.geometry[1]):
for k in range(self.post.geometry[2]):
for l in range(self.post.geometry[3]):
coords[rk] = [i * self.extent[0], j * self.extent[1], k * self.extent[2], l * self.extent[3]]
rk += 1
# Save the result
self.pre_coordinates = coords
def _generate_pre_coordinates(self):
" Returns a list for each post neuron of the corresponding center coordinates."
# Check if the list is already defined:
if self.subsampling:
try:
shape = np.array(self.subsampling).shape
except:
Global._error('SharedProjection: The sub-sampling list must have', self.post.size, 'elements of size', self.pre.dimension)
return
if shape != (self.post.size, self.pre.dimension):
Global._error('SharedProjection: The sub-sampling list must have', self.post.size, 'elements of size', self.pre.dimension)
return
self.pre_coordinates = self.subsampling
return
# Otherwise create it, possibly with sub-sampling
coords = [[] for i in range(self.post.size)]
# Compute pre-indices
idx_range= []
for dim in range(self.dim_pre):
if dim < self.dim_post:
pre_size = int(self.pre.geometry[dim])
post_size = int(self.post.geometry[dim])
sample = int(pre_size/post_size)
if post_size * sample != pre_size:
Global._error('SharedProjection: The pre-synaptic dimensions must be a multiple of the post-synaptic ones for down-sampling to work.')
idx_range.append([int((sample-1)/2) + sample * i for i in range(post_size)])
else: # extra dimension
if self.keep_last_dimension:
idx_range.append(range(self.post.geometry[dim]))
else:
idx_range.append([self._center_filter(self.weights.shape[dim])])
# Generates coordinates TODO: Find a more robust way!
if self.dim_pre == 1 :
rk = 0
for i in idx_range[0]:
coords[rk] = [i]
rk += 1
elif self.dim_pre == 2 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
coords[rk] = [i, j]
rk += 1
elif self.dim_pre == 3 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for k in idx_range[2]:
coords[rk] = [i, j, k]
rk += 1
elif self.dim_pre == 4 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for k in idx_range[2]:
for l in idx_range[3]:
coords[rk] = [i, j, k, l]
rk += 1
# Save the result
self.pre_coordinates = coords
def _generate_pre_coordinates_bank(self):
" Returns a list for each post neuron of the corresponding center coordinates, when the filter is a bank."
self.nb_filters = self.weights.shape[0]
self.dim_single_filter = self.weights.shape[1:]
# Check if the list is already defined:
if self.subsampling:
try:
shape = np.array(self.subsampling).shape
except:
Global._error('SharedProjection: The sub-sampling list must have', self.post.size / self.post.geometry[-1], 'elements of size', self.pre.dimension)
return
if shape != (self.post.size/ self.post.geometry[-1], self.pre.dimension):
Global._error('SharedProjection: The sub-sampling list must have', self.post.size/ self.post.geometry[-1], 'elements of size', self.pre.dimension)
return
self.pre_coordinates = [c + [d] for c in self.subsampling for d in range(self.nb_filters)]
return
# Otherwise create it, possibly with sub-sampling
coords = [[] for i in range(self.post.size)]
# Compute pre-indices
idx_range= []
for dim in range(self.dim_pre):
if dim < self.dim_post -1:
pre_size = self.pre.geometry[dim]
post_size = self.post.geometry[dim]
sample = int(pre_size/post_size)
if post_size * sample != pre_size:
Global._error('SharedProjection: The pre-synaptic dimensions must be a multiple of the post-synaptic ones for down-sampling to work.')
idx_range.append([int((sample-1)/2) + sample * i for i in range(post_size)])
else: # extra dimension
if self.keep_last_dimension:
idx_range.append(range(self.post.geometry[dim]))
else:
idx_range.append([self._center_filter(self.weights.shape[dim+1])])
# Generates coordinates TODO: Find a more robust way!
if self.dim_pre == 1 :
rk = 0
for i in idx_range[0]:
for d in range(self.nb_filters):
coords[rk] = [i, d]
rk += 1
elif self.dim_pre == 2 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for d in range(self.nb_filters):
coords[rk] = [i, j, d ]
rk += 1
elif self.dim_pre == 3 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for k in idx_range[2]:
for d in range(self.nb_filters):
coords[rk] = [i, j, k, d]
rk += 1
elif self.dim_pre == 4 :
rk = 0
for i in idx_range[0]:
for j in idx_range[1]:
for k in idx_range[2]:
for l in idx_range[3]:
for d in range(self.nb_filters):
coords[rk] = [i, j, k, l, d]
rk += 1
# Save the result
self.pre_coordinates = coords
################################
### Utilities
################################
def _center_filter(self, i):
return int(i/2) if i%2==1 else int(i/2)-1
def _filter_definition(self):
dim = self.dim_kernel
cpp = Global.config['precision']
pyx = Global.config['precision']
for d in range(dim):
cpp = 'std::vector< ' + cpp + ' >'
pyx = 'vector[' + pyx + ']'
cpp += ' w;'
pyx += ' w'
return cpp, pyx
def _coordinates_to_rank(self, name, geometry):
dim = len(geometry)
txt = ""
for d in range(dim):
if txt == "" : # first coordinate is special
txt = indices[0] + "_" + name
else:
txt = str(geometry[d]) + '*(' + txt + ') + ' + indices[d] + '_' + name
return txt
def _generate_convolve_code(self):
# Operation to be performed: sum, max, min, mean
operation = self.synapse_type.operation
# Main code
code = tabify("sum = 0.0;", 3)
# Generate for loops
for dim in range(self.dim_kernel):
if dim == self.dim_kernel-1:
inner_idx = ""
for i in range(self.dim_kernel-1):
inner_idx += "["+indices[i]+"_w]"
code += "auto inner_line = w"+inner_idx+".data();\n"
code += tabify("""
for(int %(index)s_w = 0; %(index)s_w < %(size)s;%(index)s_w++) {
""" % { 'index': indices[dim], 'size': self.weights.shape[dim]}, dim)
# Compute indices
if dim < self.dim_kernel:
code += tabify("""int %(index)s_pre = coord[%(dim)s] %(operator)s (%(index)s_w - %(center)s);""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim, 'operator': '-' if self.method=='convolution' else '+', 'center': self._center_filter(self.weights.shape[dim])}, 1)
else:
code += tabify("""int %(index)s_pre = coord[%(dim)s];""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim}, 1)
# Check indices
if operation in ['sum', 'mean']:
if isinstance(self.padding, str): # 'border'
code += tabify("""
if (%(index)s_pre < 0) %(index)s_pre = 0 ;
if (%(index)s_pre > %(max_size)s) %(index)s_pre = %(max_size)s ;
""" % { 'index': indices[dim], 'dim': dim, 'max_size': self.pre.geometry[dim] -1}, dim)
else:
code += tabify("""
if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)){
sum += %(padding)s;
continue;
}
""" % { 'index': indices[dim], 'padding': self.padding, 'max_size': self.pre.geometry[dim] -1}, dim)
else: # min, max
code += """
if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)) {
continue;
}
""" % { 'index': indices[dim], 'max_size': self.pre.geometry[dim] -1}
# Compute pre-synaptic rank
code += tabify("""
rk_pre = %(value)s;""" % {'value': self._coordinates_to_rank('pre', self.pre.geometry)}, dim)
# Compute the increment
index = ""
for dim in range(self.dim_kernel):
index += '[' + indices[dim] + '_w]'
increment = self.synapse_type.description['psp']['cpp'] % {
'id_pre': self.pre.id,
'id_post': self.post.id,
'local_index': index,
'global_index': '[i]',
'pre_index': '[rk_pre]',
'post_index': '[rk_post]',
'pre_prefix': 'pop'+str(self.pre.id)+'.',
'post_prefix': 'pop'+str(self.post.id)+'.'
}
# Delays
if self.delays > Global.config['dt']:
increment = increment.replace(
'pop%(id_pre)s.r[rk_pre]' % {'id_pre': self.pre.id},
'delayed_r[rk_pre]'
)
# Apply the operation
if operation == "sum":
code += tabify("""
sum += %(increment)s""" % {'increment': increment.replace('w'+inner_idx, 'inner_line')}, dim)
elif operation == "max":
code += tabify("""
%(float_prec)s _psp = %(increment)s
if(_psp > sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, dim)
elif operation == "min":
code += tabify("""
%(float_prec)s _psp = %(increment)s
if(_psp < sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, dim)
elif operation == "mean":
code += tabify("""
sum += %(increment)s""" % {'increment': increment}, dim)
else:
Global._error('SharedProjection: Operation', operation, 'is not implemented yet for shared projections.')
# Close for loops
for dim in range(self.dim_kernel):
code += tabify("""
}""", self.dim_kernel-1-dim)
impl_code = code % {'id_proj': self.id,
'target': self.target,
'id_pre': self.pre.id,
'name_pre': self.pre.name,
'size_pre': self.pre.size,
'id_post': self.post.id,
'name_post': self.post.name,
'size_post': self.post.size
}
# sum code
self.weights.size
if operation == "mean":
sum_code = """sum/%(filter_size)s""" % {'filter_size': self.weights.size}
else:
sum_code = "sum"
return impl_code, sum_code
def _generate_bank_code(self):
# Operation to be performed: sum, max, min, mean
operation = self.synapse_type.operation
# Main code
code = tabify("sum = 0.0;", 3)
# Generate for loops
for dim in range(self.dim_kernel-1):
code += tabify("""
for(int %(index)s_w = 0; %(index)s_w < %(size)s;%(index)s_w++) {
""" % { 'index': indices[dim], 'size': self.weights.shape[dim+1]}, dim)
# Compute indices
if dim < self.dim_kernel:
code += tabify("""int %(index)s_pre = coord[%(dim)s] %(operator)s (%(index)s_w - %(center)s);""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim, 'operator': '-' if self.method=='convolution' else '+', 'center': self._center_filter(self.weights.shape[dim+1])}, 1)
else:
code += tabify("""int %(index)s_pre = coord[%(dim)s];""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim}, 1)
# Check indices
if operation in ['sum', 'mean']:
if isinstance(self.padding, str): # 'border'
code += tabify("""
if (%(index)s_pre < 0) %(index)s_pre = 0 ;
if (%(index)s_pre > %(max_size)s) %(index)s_pre = %(max_size)s ;
""" % { 'index': indices[dim], 'dim': dim, 'max_size': self.pre.geometry[dim] -1}, 1+dim)
else:
code += tabify("""
if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)) {
sum += %(padding)s;
continue;
}
""" % { 'index': indices[dim], 'padding': self.padding, 'max_size': self.pre.geometry[dim] -1}, 1+dim)
else: # min, max
code += tabify("""
if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)){
continue;
}
""" % { 'index': indices[dim], 'max_size': self.pre.geometry[dim] -1}, 1+dim)
# Compute pre-synaptic rank
code +=tabify("""
rk_pre = %(value)s;""" % {'value': self._coordinates_to_rank('pre', self.pre.geometry)}, 1+dim)
# Compute the increment
index = "[coord["+str(self.dim_pre)+"]]"
for dim in range(self.dim_kernel-1):
index += '[' + indices[dim] + '_w]'
increment = self.synapse_type.description['psp']['cpp'] % {
'id_pre': self.pre.id,
'id_post': self.post.id,
'local_index': index,
'global_index': '[i]',
'pre_index': '[rk_pre]',
'post_index': '[rk_post]',
'pre_prefix': 'pop'+str(self.pre.id)+'.',
'post_prefix': 'pop'+str(self.post.id)+'.'}
# Delays
if self.delays > Global.config['dt']:
increment = increment.replace(
'pop%(id_pre)s.r[rk_pre]' % {'id_pre': self.pre.id},
'delayed_r[rk_pre]'
)
# Apply the operation
if operation == "sum":
code += tabify("""
sum += %(increment)s""" % {'increment': increment}, 1+dim)
elif operation == "max":
code += tabify("""
%(float_prec)s _psp = %(increment)s
if(_psp > sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, 1+dim)
elif operation == "min":
code += tabify("""
%(float_prec)s _psp = %(increment)s
if(_psp < sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, 1+dim)
elif operation == "mean":
code += tabify("""
sum += %(increment)s""" % {'increment': increment}, 1+dim)
else:
Global._error('SharedProjection: Operation', operation, 'is not implemented yet for shared projections.')
# Close for loops
for dim in range(self.dim_kernel-1):
code += tabify("""
}""", self.dim_kernel-1-dim)
impl_code = code % {'id_proj': self.id,
'target': self.target,
'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size,
'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size
}
# sum code
if operation == "mean":
sum_code = """sum/%(filter_size)s""" % {'filter_size': self.weights.size}
else:
sum_code = "sum"
return impl_code, sum_code
def _generate_pooling_code(self):
# Operation to be performed: sum, max, min, mean
operation = self.synapse_type.operation
# Main code
code = """
sum = 0.0;
"""
# Generate for loops
for dim in range(self.dim_pre):
if self.extent[dim] >1:
code += """
for(int %(index)s_w = 0; %(index)s_w < %(size)s; %(index)s_w++){
""" % { 'index': indices[dim], 'size': self.extent[dim]}
# Compute indices
for dim in range(self.dim_pre):
if self.extent[dim] >1:
code += """
int %(index)s_pre = coord[%(dim)s] + %(index)s_w;""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim}
else:
code += """
int %(index)s_pre = coord[%(dim)s];""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim}
# Check indices
for dim in range(self.dim_pre):
code += """
if ((%(index)s_pre < 0) ||(%(index)s_pre > %(max_size)s)){
continue;
}""" % { 'index': indices[dim], 'max_size': self.pre.geometry[dim] -1}
# Compute pre-synaptic rank
code += """
rk_pre = %(value)s;""" % {'value': self._coordinates_to_rank('pre', self.pre.geometry)}
# Compute the value to pool
psp = self.synapse_type.description['psp']['cpp'] % {
'id_pre': self.pre.id,
'id_post': self.post.id,
'local_index':'[i][j]',
'global_index': '[i]',
'pre_index': '[rk_pre]',
'post_index': '[rk_post]',
'pre_prefix': 'pop'+str(self.pre.id)+'.',
'post_prefix': 'pop'+str(self.post.id)+'.'
}
# Delays
if self.delays > Global.config['dt']:
psp = psp.replace(
'pop%(id_pre)s.r[rk_pre]' % {'id_pre': self.pre.id},
# TODO HD: wouldn't it be much better to reduce delay globaly, instead of the substraction here???
'pop%(id_pre)s._delayed_r[delay-1][rk_pre]' % {'id_pre': self.pre.id}
)
# Apply the operation
if operation == "sum":
code += """
sum += %(psp)s;"""
elif operation == "max":
code += """
%(float_prec)s _psp = %(psp)s;
if(_psp > sum) sum = _psp;"""
elif operation == "min":
code += """
%(float_prec)s _psp = %(psp)s;
if(_psp < sum) sum = _psp;"""
elif operation == "mean":
code += """
sum += %(psp)s;"""
else:
Global._error('SharedProjection: Operation', operation, 'is not implemented yet for shared projections with pooling.')
# Close for loops
for dim in range(self.dim_pre):
if self.extent[dim] >1:
code += """
}"""
impl_code = code % {'id_proj': self.id,
'target': self.target,
'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size,
'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size,
'psp': psp,
'float_prec': Global.config['precision']
}
if operation == "mean":
size = 1
for dim in range(self.pre.dimension):
size *= self.extent[dim]
sum_code = "sum/"+ str(size)
else:
sum_code = "sum"
return impl_code, sum_code
################################
### Code generation
################################
def _generate(self):
if self._operation_type == 'convolve':
# Filter definition
filter_definition, filter_pyx_definition = self._filter_definition()
# Convolve_code
if not self.multiple:
convolve_code, sum_code = self._generate_convolve_code()
else:
convolve_code, sum_code = self._generate_bank_code()
# Generate the code
self._generate_omp(filter_definition, filter_pyx_definition, convolve_code, sum_code)
#self._generate_cuda(filter_definition, filter_pyx_definition, convolve_code, sum_code)
elif self._operation_type == 'pooling':
# Filter definition
filter_definition, filter_pyx_definition = "",""
# Convolve_code
convolve_code, sum_code = self._generate_pooling_code()
# Generate the code
self._generate_omp(filter_definition, filter_pyx_definition, convolve_code, sum_code, kernel=False)
elif self._operation_type == 'copy':
# Generate the code
self._generate_copy()
def _generate_omp(self, filter_definition, filter_pyx_definition, convolve_code, sum_code, kernel=True):
# Specific template for generation
self._specific_template = {
# Declare the connectivity matrix
'declare_connectivity_matrix': """
std::vector<int> post_rank;
std::vector< std::vector<int> > pre_rank;
""" + filter_definition.strip(),
# Accessors for the connectivity matrix
'access_connectivity_matrix': """
// Accessor to connectivity data
std::vector<int> get_post_rank() { return post_rank; }
void set_post_rank(std::vector<int> ranks) { post_rank = ranks; }
std::vector< std::vector<int> > get_pre_rank() { return pre_rank; }
void set_pre_rank(std::vector< std::vector<int> > ranks) { pre_rank = ranks; }
int dendrite_size(int n) { return pre_rank[n].size(); }
""" ,
# Export the connectivity matrix
'export_connectivity': """
# Connectivity
vector[int] get_post_rank()
vector[vector[int]] get_pre_rank()
void set_post_rank(vector[int])
void set_pre_rank(vector[vector[int]])
""",
# Arguments to the wrapper constructor
'wrapper_args': "weights, coords",
# Delays
'wrapper_init_delay': "",
# Initialize the wrapper connectivity matrix
'wrapper_init_connectivity': """
proj%(id_proj)s.set_post_rank(list(range(%(size_post)s)))
proj%(id_proj)s.set_pre_rank(coords)
""" % {'id_proj': self.id, 'size_post': self.post.size},
# Wrapper access to connectivity matrix
'wrapper_access_connectivity': """
# Connectivity
def post_rank(self):
return proj%(id_proj)s.get_post_rank()
def pre_rank(self, int n):
return proj%(id_proj)s.get_pre_rank()
""" % {'id_proj': self.id},
# Wrapper access to variables
'wrapper_access_parameters_variables' : "",
# Variables for the psp code
'psp_prefix': """
int rk_pre;
%(float_prec)s sum=0.0;""" % {'float_prec': Global.config['precision']}
}
# Kernel-based method: specify w with the correct dimension
if kernel:
self._specific_template['access_connectivity_matrix'] += """
// Local parameter w
%(type_w)s get_w() { return w; }
void set_w(%(type_w)s value) { w = value; }
""" % {'type_w': filter_definition.replace(' w;', '')}
self._specific_template['export_connectivity'] += """
# Local variable w
%(type_w)s get_w()
void set_w(%(type_w)s)
""" % {'type_w': filter_pyx_definition.replace(' w', '')}
self._specific_template['wrapper_init_connectivity'] += """
proj%(id_proj)s.set_w(weights)
""" % {'id_proj': self.id}
self._specific_template['wrapper_access_connectivity'] += """
# Local variable w
def get_w(self):
return proj%(id_proj)s.get_w()
def set_w(self, value):
proj%(id_proj)s.set_w( value )
def get_dendrite_w(self, int rank):
return proj%(id_proj)s.get_w()
def set_dendrite_w(self, int rank, value):
proj%(id_proj)s.set_w(value)
def get_synapse_w(self, int rank_post, int rank_pre):
return 0.0
def set_synapse_w(self, int rank_post, int rank_pre, %(float_prec)s value):
pass
""" % {'id_proj': self.id, 'float_prec': Global.config['precision']}
# Override the monitor to avoid recording the weights
self._specific_template['monitor_class'] = ""
self._specific_template['monitor_export'] = ""
self._specific_template['monitor_wrapper'] = ""
# OMP code
omp_code = ""
if Global.config['num_threads'] > 1:
omp_code = """
#pragma omp for private(sum, rk_pre, coord) %(psp_schedule)s""" % {'psp_schedule': "" if not 'psp_schedule' in self._omp_config.keys() else self._omp_config['psp_schedule']}
# HD ( 16.10.2015 ):
# pre-load delayed firing rate in a local array, so we
# prevent multiple accesses to pop%(id_pre)s._delayed_r[delay-1]
# wheareas delay is set available as variable
# TODO HD: wouldn't it be much better to reduce delay globaly, instead of the substraction here???
if self.delays > Global.config['dt']:
pre_load_r = """
// pre-load delayed firing rate
auto delayed_r = pop%(id_pre)s._delayed_r[delay-1];
"""% {'id_pre': self.pre.id}
else:
pre_load_r = ""
# Compute sum
wsum = """
if ( _transmission && pop%(id_pre)s._active ) {
std::vector<int> coord;
""" + pre_load_r + """
%(omp_code)s
for(int i = 0; i < %(size_post)s; i++){
coord = pre_rank[i];
""" + convolve_code + """
pop%(id_post)s._sum_%(target)s[i] += """ + sum_code + """;
} // for
} // if
"""
self._specific_template['psp_code'] = wsum % \
{ 'id_proj': self.id,
'target': self.target,
'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size,
'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size,
'omp_code': omp_code,
'convolve_code': convolve_code
}
# override size in bytes calculation
self._specific_template['size_in_bytes'] = "//TODO:\n"
def _generate_cuda(self, filter_definition, filter_pyx_definition, convolve_code, sum_code, kernel=True):
"TODO"
# Template
self._specific_template = {}
def _generate_copy(self):
# Specific template for generation
self._specific_template = {
# Declare the connectivity matrix
'declare_connectivity_matrix': "",
# Accessors for the connectivity matrix
'access_connectivity_matrix': "",
# No initiaization of the connectivity matrix
'init_connectivity_matrix': "",
# Export the connectivity matrix
'export_connectivity': "",
# Initialize the wrapper connectivity matrix
'wrapper_init_connectivity': "",
# Wrapper access to connectivity matrix
'wrapper_access_connectivity': """
# Connectivity
def post_rank(self):
return proj%(id_copy)s.get_post_rank()
def pre_rank(self, int n):
return proj%(id_copy)s.get_pre_rank()[n]
# Local variable w
def get_w(self):
return proj%(id_copy)s.get_w()
def set_w(self, value):
print('Cannot modify weights of a copied projection.')
def get_dendrite_w(self, int rank):
return proj%(id_copy)s.get_dendrite_w(rank)
def set_dendrite_w(self, int rank, value):
print('Cannot modify weights of a copied projection.')
def get_synapse_w(self, int rank_post, int rank_pre):
return proj%(id_copy)s.get_synapse_w(rank_post, rank_pre)
def set_synapse_w(self, int rank_post, int rank_pre, %(float_prec)s value):
print('Cannot modify weights of a copied projection.')
""" % {'id_proj': self.id, 'id_copy': self.projection.id, 'float_prec': Global.config['precision']},
# Wrapper access to variables
'wrapper_access_parameters_variables' : "",
# Variables for the psp code
'psp_prefix': """
int rk_pre;
%(float_prec)s sum=0.0;"""
} % {'float_prec': Global.config['precision']}
# OMP code
if Global.config['num_threads'] > 1:
omp_code = '#pragma omp for private(sum)' if self.post.size > Global.OMP_MIN_NB_NEURONS else ''
else:
omp_code = ""
# PSP
psp = self.synapse_type.description['psp']['cpp'] % {
'id_pre': self.pre.id,
'id_post': self.post.id,
'local_index':'[i][j]',
'global_index': '[i]',
'pre_index': '[pre_rank[i][j]]',
'post_index': '[post_rank[i]]',
'pre_prefix': 'pop'+str(self.pre.id)+'.',
'post_prefix': 'pop'+str(self.post.id)+'.'}
psp = psp.replace('rk_pre', 'pre_rank[i][j]').replace(';', '')
# Take delays into account if any
if self.delays > Global.config['dt']:
psp = psp.replace(
'pop%(id_pre)s.r[rk_pre]' % {'id_pre': self.pre.id},
'pop%(id_pre)s._delayed_r[delay-1][rk_pre]' % {'id_pre': self.pre.id}
# TODO HD: wouldn't it be much better to reduce delay globaly, instead of the substraction here???
)
# Operation to be performed: sum, max, min, mean
operation = self.synapse_type.operation
if operation == 'sum':
sum_code = """
// proj%(id_proj)s: %(name_pre)s -> %(name_post)s with target %(target)s, copied from proj%(id)s
if(pop%(id_post)s._active){
%(omp_code)s
for(int i = 0; i < post_rank.size(); i++){
sum = 0.0;
for(int j = 0; j < pre_rank[i].size(); j++){
sum += %(psp)s ;
}
pop%(id_post)s._sum_%(target)s[post_rank[i]] += sum;
}
}
"""
elif operation == 'max':
sum_code = """
// proj%(id_proj)s: %(name_pre)s -> %(name_post)s with target %(target)s, copied from proj%(id)s
if(pop%(id_post)s._active){
%(omp_code)s
for(int i = 0; i < post_rank.size(); i++){
sum = %(psp)s;
for(int j = 0; j < pre_rank[i].size(); j++){
if(%(psp)s > sum){
sum = %(psp)s ;
}
}
pop%(id_post)s._sum_%(target)s[post_rank[i]] += sum;
}
}
"""
elif operation == 'min':
sum_code = """
// proj%(id_proj)s: %(name_pre)s -> %(name_post)s with target %(target)s, copied from proj%(id)s
if(pop%(id_post)s._active){
%(omp_code)s
for(int i = 0; i < post_rank.size(); i++){
sum = %(psp)s;
for(int j = 0; j < pre_rank[i].size(); j++){
if(%(psp)s < sum){
sum = %(psp)s ;
}
}
pop%(id_post)s._sum_%(target)s[post_rank[i]] += sum;
}
}
"""
elif operation == 'mean':
sum_code = """
// proj%(id_proj)s: %(name_pre)s -> %(name_post)s with target %(target)s, copied from proj%(id)s
if(pop%(id_post)s._active){
%(omp_code)s
for(int i = 0; i < post_rank.size(); i++){
sum = 0.0;
for(int j = 0; j < pre_rank[i].size(); j++){
sum += %(psp)s ;
}
pop%(id_post)s._sum_%(target)s[post_rank[i]] += sum/ (%(float_prec)s)(pre_rank[i].size());
}
}
"""
else:
sum_code = ""
self.generator['omp']['body_compute_psp'] = sum_code % {
'id_proj': self.id, 'target': self.target,
'id_pre': self.pre.id, 'name_pre': self.pre.name,
'id_post': self.post.id, 'name_post': self.post.name,
'id': self.projection.id,
'float_prec': Global.config['precision'],
'omp_code': omp_code,
'psp': psp
}
##############################
## Override useless methods
##############################
def save_connectivity(self, filename):
Global._warning('Shared projections can not be saved.')
def save(self, filename):
Global._warning('Shared projections can not be saved.')
def load(self, filename):
Global._warning('Shared projections can not be loaded.')
def receptive_fields(self, variable = 'w', in_post_geometry = True):
Global._warning('Shared projections can not display receptive fields.')
def connectivity_matrix(self, fill=0.0):
Global._warning('Shared projections can not display connectivity matrices.')
|
ANNarchy/ANNarchy
|
ANNarchy/extensions/weightsharing/SharedProjection.py
|
Python
|
gpl-2.0
| 51,558
|
[
"NEURON"
] |
2322917244c94ccbd49f7170b2e08af171ac9a11665d644fb51f67c5bf442a5e
|
# numkit --- data fitting
# Copyright (c) 2010 Oliver Beckstein <orbeckst@gmail.com>
# Released under the "Modified BSD Licence" (see COPYING).
"""
:mod:`numkit.fitting` --- Fitting data
======================================
The module contains functions to do least square fits of functions of
one variable f(x) to data points (x,y).
Example
-------
For example, to fit a un-normalized Gaussian with :class:`FitGauss` to
data distributed with mean 5.0 and standard deviation 3.0::
from numkit.fitting import FitGauss
import numpy, numpy.random
# generate suitably noisy data
mu, sigma = 5.0, 3.0
Y,edges = numpy.histogram(sigma*numpy.random.randn(10000), bins=100, density=True)
X = 0.5*(edges[1:]+edges[:-1]) + mu
g = FitGauss(X, Y)
print(g.parameters)
# [ 4.98084541 3.00044102 1.00069061]
print(numpy.array([mu, sigma, 1]) - g.parameters)
# [ 0.01915459 -0.00044102 -0.00069061]
import matplotlib.pyplot as plt
plt.plot(X, Y, 'ko', label="data")
plt.plot(X, g.fit(X), 'r-', label="fit")
.. figure:: /numkit/FitGauss.png
:scale: 40 %
:alt: Gaussian fit with data points
A Gaussian (red) was fit to the data points (black circles) with
the :class:`numkit.fitting.FitGauss` class.
If the initial parameters for the least square optimization do not
lead to a solution then one can provide customized starting values in
the *parameters* keyword argument::
g = FitGauss(X, Y, parameters=[10, 1, 1])
The *parameters* have different meaning for the different fit
functions; the documentation for each function shows them in the
context of the fit function.
Creating new fit functions
--------------------------
New fit function classes can be derived from :class:`FitFunc`. The
documentation and the methods :meth:`FitFunc.f_factory` and
:meth:`FitFunc.initial_values` must be overriden. For example, the
class :class:`FitGauss` is implemented as ::
class FitGauss(FitFunc):
'''y = f(x) = p[2] * 1/sqrt(2*pi*p[1]**2) * exp(-(x-p[0])**2/(2*p[1]**2))'''
def f_factory(self):
def fitfunc(p,x):
return p[2] * 1.0/(p[1]*numpy.sqrt(2*numpy.pi)) * numpy.exp(-(x-p[0])**2/(2*p[1]**2))
return fitfunc
def initial_values(self):
return [0.0,1.0,0.0]
The function to be fitted is defined in :func:`fitfunc`. The
parameters are accessed as ``p[0]``, ``p[1]``, ... For each parameter,
a suitable initial value must be provided.
Functions and classes
---------------------
.. autofunction:: Pearson_r
.. autofunction:: linfit
.. autoclass:: FitFunc
:members:
.. autoclass:: FitLin
.. autoclass:: FitExp
.. autoclass:: FitExp2
.. autoclass:: FitGauss
"""
import numpy
import logging
logger = logging.getLogger("numkit.fitting")
def Pearson_r(x,y):
"""Pearson's r (correlation coefficient).
Pearson(x,y) --> correlation coefficient
*x* and *y* are arrays of same length.
Historical note -- Naive implementation of Pearson's r ::
Ex = scipy.stats.mean(x)
Ey = scipy.stats.mean(y)
covxy = numpy.sum((x-Ex)*(y-Ey))
r = covxy/math.sqrt(numpy.sum((x-Ex)**2)*numpy.sum((y-Ey)**2))
"""
return numpy.corrcoef(x,y)[1,0]
def linfit(x,y,dy=[]):
"""Fit a straight line y = a + bx to the data in *x* and *y*.
Errors on y should be provided in dy in order to assess the
goodness of the fit and derive errors on the parameters.
linfit(x,y[,dy]) --> result_dict
Fit y = a + bx to the data in x and y by analytically minimizing
chi^2. dy holds the standard deviations of the individual y_i. If
dy is not given, they are assumed to be constant (note that in
this case Q is set to 1 and it is meaningless and chi2 is
normalised to unit standard deviation on all points!).
Returns the parameters a and b, their uncertainties sigma_a and
sigma_b, and their correlation coefficient r_ab; it also returns
the chi-squared statistic and the goodness-of-fit probability Q
(that the fit would have chi^2 this large or larger; Q < 10^-2
indicates that the model is bad --- Q is the probability that a
value of chi-square as _poor_ as the calculated statistic chi2
should occur by chance.)
:Returns: result_dict with components
intercept, sigma_intercept
a +/- sigma_a
slope, sigma_slope
b +/- sigma_b
parameter_correlation
correlation coefficient r_ab between a and b
chi_square
chi^2 test statistic
Q_fit
goodness-of-fit probability
Based on 'Numerical Recipes in C', Ch 15.2.
"""
import scipy.stats
n = len(x)
m = len(y)
if n != m:
raise ValueError("lengths of x and y must match: %s != %s" % (n, m))
try:
have_dy = (len(dy) > 0)
except TypeError:
have_dy = False
if not have_dy:
dy = numpy.ones((n),numpy.float)
x = numpy.asarray(x)
y = numpy.asarray(y)
dy = numpy.asarray(dy)
s2 = dy*dy
S = numpy.add.reduce(1/s2)
Sx = numpy.add.reduce(x/s2)
Sy = numpy.add.reduce(y/s2)
Sxx = numpy.add.reduce(x*x/s2)
Sxy = numpy.add.reduce(x*y/s2)
t = (x - Sx/S)/dy
Stt = numpy.add.reduce(t*t)
b = numpy.add.reduce(t*y/dy)/Stt
a = (Sy - Sx*b)/S
sa = numpy.sqrt((1 + (Sx*Sx)/(S*Stt))/S)
sb = numpy.sqrt(1/Stt)
covab = -Sx/(S*Stt)
r = covab/(sa*sb)
chi2 = numpy.add.reduce(((y-a-b*x)/dy)**2)
if not have_dy:
# estimate error if none were provided
sigmadata = numpy.sqrt(chi2/(n-2))
sa *= sigmadata
sb *= sigmadata
Q = 1.0
else:
Q = scipy.stats.chisqprob(chi2,n-2)
return {"intercept":a,"slope":b,
"sigma_intercept":sa,"sigma_slope":sb,
"parameter_correlation":r, "chi_square":chi2, "Q":Q}
class FitFunc(object):
"""Fit a function f to data (x,y) using the method of least squares.
The function is fitted when the object is created, using
:func:`scipy.optimize.leastsq`. One must derive from the base class
:class:`FitFunc` and override the :meth:`FitFunc.f_factory` (including
the definition of an appropriate local :func:`fitfunc` function) and
:meth:`FitFunc.initial_values` appropriately. See the examples for a
linear fit :class:`FitLin`, a 1-parameter exponential fit :class:`FitExp`,
or a 3-parameter double exponential fit :class:`FitExp2`.
The object provides two attributes
:attr:`FitFunc.parameters`
list of parameters of the fit
:attr:`FitFunc.message`
message from :func:`scipy.optimize.leastsq`
After a successful fit, the fitted function can be applied to any data (a
1D-numpy array) with :meth:`FitFunc.fit`.
"""
def __init__(self,x,y,parameters=None):
import scipy.optimize
_x = numpy.asarray(x)
_y = numpy.asarray(y)
p0 = self._get_initial_values(parameters)
fitfunc = self.f_factory()
def errfunc(p,x,y):
return fitfunc(p,x) - y # residuals
p,msg = scipy.optimize.leastsq(errfunc,p0[:],args=(_x,_y))
try:
p[0]
self.parameters = p
except (TypeError,IndexError,):
# TypeError for int p, IndexError for numpy scalar (new scipy)
self.parameters = [p]
self.message = msg
def f_factory(self):
"""Stub for fit function factory, which returns the fit function.
Override for derived classes.
"""
def fitfunc(p,x):
# return f(p,x); should be a numpy ufunc
raise NotImplementedError("base class must be extended for each fit function")
return fitfunc
def _get_initial_values(self, parameters=None):
p0 = numpy.asarray(self.initial_values())
if parameters is not None:
try:
p0[:] = parameters
except ValueError:
raise ValueError("Wrong number of custom initital values %r, should be something like %r" % (parameters, p0))
return p0
def initial_values(self):
"""List of initital guesses for all parameters p[]"""
# return [1.0, 2.0, 0.5]
raise NotImplementedError("base class must be extended for each fit function")
def fit(self,x):
"""Applies the fit to all *x* values"""
fitfunc = self.f_factory()
return fitfunc(self.parameters,numpy.asarray(x))
class FitExp(FitFunc):
"""y = f(x) = exp(-p[0]*x)"""
def f_factory(self):
def fitfunc(p,x):
return numpy.exp(-p[0]*x) # exp(-B*x)
return fitfunc
def initial_values(self):
return [1.0]
def __repr__(self):
return "<FitExp "+str(self.parameters)+">"
class FitExp2(FitFunc):
"""y = f(x) = p[0]*exp(-p[1]*x) + (1-p[0])*exp(-p[2]*x)"""
def f_factory(self):
def fitfunc(p,x):
return p[0]*numpy.exp(-p[1]*x) + (1-p[0])*numpy.exp(-p[2]*x)
return fitfunc
def initial_values(self):
return [0.5,0.1,1e-4]
def __repr__(self):
return "<FitExp2"+str(self.parameters)+">"
class FitLin(FitFunc):
"""y = f(x) = p[0]*x + p[1]"""
def f_factory(self):
def fitfunc(p,x):
return p[0]*x + p[1]
return fitfunc
def initial_values(self):
return [1.0,0.0]
def __repr__(self):
return "<FitLin"+str(self.parameters)+">"
class FitGauss(FitFunc):
"""y = f(x) = p[2] * 1/sqrt(2*pi*p[1]**2) * exp(-(x-p[0])**2/(2*p[1]**2))
Fits an un-normalized gaussian (height scaled with parameter p[2]).
* p[0] == mean $\mu$
* p[1] == standard deviation $\sigma$
* p[2] == scale $a$
"""
def f_factory(self):
def fitfunc(p,x):
return p[2] * 1.0/(p[1]*numpy.sqrt(2*numpy.pi)) * numpy.exp(-(x-p[0])**2/(2*p[1]**2))
return fitfunc
def initial_values(self):
return [0.0,1.0,0.0]
def __repr__(self):
return "<FitGauss"+str(self.parameters)+">"
|
CTCNano/GromacsWrapper
|
numkit/fitting.py
|
Python
|
gpl-3.0
| 10,025
|
[
"Gaussian"
] |
ce78bc13fbbb5d75bb657fdb9d4f9443ef1b817473bf848c2be417999cf31a52
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount, parallel_helper
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
.. versionadded:: 0.18
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False,
random_state=random_state)
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'estimators_')
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('balanced', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight != 'balanced_subsample' or
not self.bootstrap):
if self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
check_is_fitted(self, 'estimators_')
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced",
"balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
Notes
-----
The features are always randomly permuted at each split. Therefore,
the best found split may vary, even with the same training data,
``max_features=n_features`` and ``bootstrap=False``, if the improvement
of the criterion is identical for several splits enumerated during the
search of the best split. To obtain a deterministic behaviour during
fitting, ``random_state`` has to be fixed.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
min_impurity_split=1e-7,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : integer, optional (default=10)
Number of trees in the forest.
max_depth : integer, optional (default=5)
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
min_impurity_split=1e-7,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "min_impurity_split",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
X = check_array(X, accept_sparse=['csc'])
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
rishikksh20/scikit-learn
|
sklearn/ensemble/forest.py
|
Python
|
bsd-3-clause
| 67,993
|
[
"Brian"
] |
684509af9361c7e627de7cf7264a58768a132df0a66495d17f3d3ed28c81af65
|
"""Tests for LogFilters."""
import pytest
from DIRAC.FrameworkSystem.private.standardLogging.LogLevels import LogLevels
from DIRAC.Resources.LogFilters.PatternFilter import PatternFilter
from DIRAC.Resources.LogFilters.ModuleFilter import ModuleFilter, LEVEL
class Record(object):
def __init__(self, msg, varmessage='', name=None, level=10):
self.args = (msg,)
self.varmessage = varmessage
self.name = name
self.levelno = level
@pytest.fixture
def pf():
options = {'Accept': 'some,Words', 'Reject': 'Foo'}
pf = PatternFilter(options)
assert pf._accept == ['some', 'Words']
assert pf._reject == ['Foo']
return pf
@pytest.mark.parametrize('record, result', [
(('Some',), False),
(('some, Words',), True),
(('some, Words', 'Foo'), False),
])
def test_filter1(pf, record, result):
assert pf.filter(Record(*record)) == result
@pytest.fixture
def mf():
options = {'dirac': 'ERROR', 'l1': 'ERROR', 'l1.l2': 'INFO', 'll1.ll2.ll3': 'VERBOSE'}
mf = ModuleFilter(options)
assert mf._configDict == \
{'dirac': {LEVEL: LogLevels.ERROR},
'l1': {LEVEL: LogLevels.ERROR,
'l2': {LEVEL: LogLevels.INFO}},
'll1': {LEVEL: LogLevels.DEBUG,
'll2': {LEVEL: LogLevels.DEBUG,
'll3': {LEVEL: LogLevels.VERBOSE}}},
}
return mf
@pytest.mark.parametrize('name, level, result', [
('dirac', LogLevels.INFO, False),
('dirac', LogLevels.ERROR, True),
('l1', LogLevels.INFO, False),
('l1', LogLevels.ERROR, True),
('l1.ll2', LogLevels.INFO, False), # inherits from l1
('l1.ll2', LogLevels.ERROR, True), # inherits from l1
('l1.l2', LogLevels.INFO, True), # set to INFO
('l1.l2.l3', LogLevels.INFO, True), # inherits from l1.l2
('ll1.ll2', LogLevels.DEBUG, True), # base level is DEBUG by default
('ll1.ll2.ll3', LogLevels.DEBUG, False), # set to VERBOSE
('ll1.ll2.ll3', LogLevels.INFO, True), # set to VERBOSE
])
def test_mf(mf, name, level, result):
assert mf.filter(Record('blabla', name=name, level=level)) == result
|
chaen/DIRAC
|
Resources/LogFilters/test/Test_PatternFilter.py
|
Python
|
gpl-3.0
| 2,081
|
[
"DIRAC"
] |
47af82493636a2d868e4cb341f21efa0175720d91594848833330f572f1e1533
|
""" DIRAC Transformation DB
Transformation database is used to collect and serve the necessary information
in order to automate the task of job preparation for high level transformations.
This class is typically used as a base class for more specific data processing
databases
"""
import re, time, threading, copy
from types import IntType, LongType
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.List import stringListToString, intListToString, breakListIntoChunks
from DIRAC.Core.Utilities.Shifter import setupShifterProxyInEnv
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.Subprocess import pythonCall
__RCSID__ = "$Id$"
MAX_ERROR_COUNT = 10
#############################################################################
class TransformationDB( DB ):
""" TransformationDB class
"""
def __init__( self, dbname = None, dbconfig = None, dbIn = None ):
""" The standard constructor takes the database name (dbname) and the name of the
configuration section (dbconfig)
"""
if not dbname:
dbname = 'TransformationDB'
if not dbconfig:
dbconfig = 'Transformation/TransformationDB'
if not dbIn:
DB.__init__( self, dbname, dbconfig )
self.lock = threading.Lock()
self.filters = ()
res = self.__updateFilters()
if not res['OK']:
gLogger.fatal( "Failed to create filters" )
self.allowedStatusForTasks = ( 'Unused', 'ProbInFC' )
self.TRANSPARAMS = [ 'TransformationID',
'TransformationName',
'Description',
'LongDescription',
'CreationDate',
'LastUpdate',
'AuthorDN',
'AuthorGroup',
'Type',
'Plugin',
'AgentType',
'Status',
'FileMask',
'TransformationGroup',
'GroupSize',
'InheritedFrom',
'Body',
'MaxNumberOfTasks',
'EventsPerTask',
'TransformationFamily']
self.mutable = [ 'TransformationName',
'Description',
'LongDescription',
'AgentType',
'Status',
'MaxNumberOfTasks',
'TransformationFamily',
'Body'] # for the moment include TransformationFamily
self.TRANSFILEPARAMS = ['TransformationID',
'FileID',
'Status',
'TaskID',
'TargetSE',
'UsedSE',
'ErrorCount',
'LastUpdate',
'InsertedTime']
self.TRANSFILETASKPARAMS = ['TransformationID',
'FileID',
'TaskID']
self.TASKSPARAMS = [ 'TaskID',
'TransformationID',
'ExternalStatus',
'ExternalID',
'TargetSE',
'CreationTime',
'LastUpdateTime']
self.ADDITIONALPARAMETERS = ['TransformationID',
'ParameterName',
'ParameterValue',
'ParameterType'
]
# This is here to ensure full compatibility between different versions of the MySQL DB schema
self.isTransformationTasksInnoDB = True
res = self._query( "SELECT Engine FROM INFORMATION_SCHEMA.TABLES WHERE table_name = 'TransformationTasks'" )
if not res['OK']:
raise RuntimeError( res['Message'] )
else:
engine = res['Value'][0][0]
if engine.lower() != 'innodb':
self.isTransformationTasksInnoDB = False
def getName( self ):
""" Get the database name
"""
return self.dbName
###########################################################################
#
# These methods manipulate the Transformations table
#
def addTransformation( self, transName, description, longDescription, authorDN, authorGroup, transType,
plugin, agentType, fileMask,
transformationGroup = 'General',
groupSize = 1,
inheritedFrom = 0,
body = '',
maxTasks = 0,
eventsPerTask = 0,
addFiles = True,
connection = False ):
""" Add new transformation definition including its input streams
"""
connection = self.__getConnection( connection )
res = self._getTransformationID( transName, connection = connection )
if res['OK']:
return S_ERROR( "Transformation with name %s already exists with TransformationID = %d" % ( transName,
res['Value'] ) )
elif res['Message'] != "Transformation does not exist":
return res
self.lock.acquire()
res = self._escapeString( body )
if not res['OK']:
return S_ERROR( "Failed to parse the transformation body" )
body = res['Value']
req = "INSERT INTO Transformations (TransformationName,Description,LongDescription, \
CreationDate,LastUpdate,AuthorDN,AuthorGroup,Type,Plugin,AgentType,\
FileMask,Status,TransformationGroup,GroupSize,\
InheritedFrom,Body,MaxNumberOfTasks,EventsPerTask)\
VALUES ('%s','%s','%s',\
UTC_TIMESTAMP(),UTC_TIMESTAMP(),'%s','%s','%s','%s','%s',\
'%s','New','%s',%d,\
%d,%s,%d,%d);" % \
( transName, description, longDescription,
authorDN, authorGroup, transType, plugin, agentType,
fileMask, transformationGroup, groupSize,
inheritedFrom, body, maxTasks, eventsPerTask )
res = self._update( req, connection )
if not res['OK']:
self.lock.release()
return res
transID = res['lastRowId']
self.lock.release()
# If the transformation has an input data specification
if fileMask:
self.filters.append( ( transID, re.compile( fileMask ) ) )
if inheritedFrom:
res = self._getTransformationID( inheritedFrom, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get ID for parent transformation, now deleting", res['Message'] )
return self.deleteTransformation( transID, connection = connection )
originalID = res['Value']
# FIXME: this is not the right place to change status information, and in general the whole should not be here
res = self.setTransformationParameter( originalID, 'Status', 'Completing',
author = authorDN, connection = connection )
if not res['OK']:
gLogger.error( "Failed to update parent transformation status: now deleting", res['Message'] )
return self.deleteTransformation( transID, connection = connection )
res = self.setTransformationParameter( originalID, 'AgentType', 'Automatic',
author = authorDN, connection = connection )
if not res['OK']:
gLogger.error( "Failed to update parent transformation agent type, now deleting", res['Message'] )
return self.deleteTransformation( transID, connection = connection )
message = 'Creation of the derived transformation (%d)' % transID
self.__updateTransformationLogging( originalID, message, authorDN, connection = connection )
res = self.getTransformationFiles( condDict = {'TransformationID':originalID}, connection = connection )
if not res['OK']:
gLogger.error( "Could not get transformation files, now deleting", res['Message'] )
return self.deleteTransformation( transID, connection = connection )
if res['Records']:
res = self.__insertExistingTransformationFiles( transID, res['Records'], connection = connection )
if not res['OK']:
gLogger.error( "Could not insert files, now deleting", res['Message'] )
return self.deleteTransformation( transID, connection = connection )
if addFiles and fileMask:
self.__addExistingFiles( transID, connection = connection )
message = "Created transformation %d" % transID
self.__updateTransformationLogging( transID, message, authorDN, connection = connection )
return S_OK( transID )
def getTransformations( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
orderAttribute = None, limit = None, extraParams = False, offset = None, connection = False ):
""" Get parameters of all the Transformations with support for the web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM Transformations %s" % ( intListToString( self.TRANSPARAMS ),
self.buildCondition( condDict, older, newer, timeStamp,
orderAttribute, limit, offset = offset ) )
res = self._query( req, connection )
if not res['OK']:
return res
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = []
transDict = {}
count = 0
for item in row:
transDict[self.TRANSPARAMS[count]] = item
count += 1
if not isinstance( item, ( int, long ) ):
rList.append( str( item ) )
else:
rList.append( item )
webList.append( rList )
if extraParams:
res = self.__getAdditionalParameters( transDict['TransformationID'], connection = connection )
if not res['OK']:
return res
transDict.update( res['Value'] )
resultList.append( transDict )
result = S_OK( resultList )
result['Records'] = webList
result['ParameterNames'] = copy.copy( self.TRANSPARAMS )
return result
def getTransformation( self, transName, extraParams = False, connection = False ):
"""Get Transformation definition and parameters of Transformation identified by TransformationID
"""
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getTransformations( condDict = {'TransformationID':transID}, extraParams = extraParams,
connection = connection )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( "Transformation %s did not exist" % transName )
return S_OK( res['Value'][0] )
def getTransformationParameters( self, transName, parameters, connection = False ):
""" Get the requested parameters for a supplied transformation """
if isinstance( parameters, basestring ):
parameters = [parameters]
extraParams = False
for param in parameters:
if not param in self.TRANSPARAMS:
extraParams = True
res = self.getTransformation( transName, extraParams = extraParams, connection = connection )
if not res['OK']:
return res
transParams = res['Value']
paramDict = {}
for reqParam in parameters:
if not reqParam in transParams.keys():
return S_ERROR( "Parameter %s not defined for transformation" % reqParam )
paramDict[reqParam] = transParams[reqParam]
if len( paramDict ) == 1:
return S_OK( paramDict[reqParam] )
return S_OK( paramDict )
def getTransformationWithStatus( self, status, connection = False ):
""" Gets a list of the transformations with the supplied status """
req = "SELECT TransformationID FROM Transformations WHERE Status = '%s';" % status
res = self._query( req, conn = connection )
if not res['OK']:
return res
transIDs = []
for tupleIn in res['Value']:
transIDs.append( tupleIn[0] )
return S_OK( transIDs )
def getTableDistinctAttributeValues( self, table, attributes, selectDict, older = None, newer = None,
timeStamp = None, connection = False ):
tableFields = { 'Transformations' : self.TRANSPARAMS,
'TransformationTasks' : self.TASKSPARAMS,
'TransformationFiles' : self.TRANSFILEPARAMS}
possibleFields = tableFields.get( table, [] )
return self.__getTableDistinctAttributeValues( table, possibleFields, attributes, selectDict, older, newer,
timeStamp, connection = connection )
def __getTableDistinctAttributeValues( self, table, possible, attributes, selectDict, older, newer,
timeStamp, connection = False ):
connection = self.__getConnection( connection )
attributeValues = {}
for attribute in attributes:
if possible and ( not attribute in possible ):
return S_ERROR( 'Requested attribute (%s) does not exist in table %s' % ( attribute, table ) )
res = self.getDistinctAttributeValues( table, attribute, condDict = selectDict, older = older, newer = newer,
timeStamp = timeStamp, connection = connection )
if not res['OK']:
return S_ERROR( 'Failed to serve values for attribute %s in table %s' % ( attribute, table ) )
attributeValues[attribute] = res['Value']
return S_OK( attributeValues )
def __updateTransformationParameter( self, transID, paramName, paramValue, connection = False ):
if not ( paramName in self.mutable ):
return S_ERROR( "Can not update the '%s' transformation parameter" % paramName )
if paramName == 'Body':
res = self._escapeString( paramValue )
if not res['OK']:
return S_ERROR( "Failed to parse parameter value" )
paramValue = res['Value']
req = "UPDATE Transformations SET %s=%s, LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % ( paramName,
paramValue,
transID )
return self._update( req, connection )
req = "UPDATE Transformations SET %s='%s', LastUpdate=UTC_TIMESTAMP() WHERE TransformationID=%d" % ( paramName,
paramValue,
transID )
return self._update( req, connection )
def _getTransformationID( self, transName, connection = False ):
""" Method returns ID of transformation with the name=<name> """
try:
transName = long( transName )
cmd = "SELECT TransformationID from Transformations WHERE TransformationID=%d;" % transName
except:
if not isinstance( transName, basestring ):
return S_ERROR( "Transformation should ID or name" )
cmd = "SELECT TransformationID from Transformations WHERE TransformationName='%s';" % transName
res = self._query( cmd, connection )
if not res['OK']:
gLogger.error( "Failed to obtain transformation ID for transformation", "%s:%s" % ( transName, res['Message'] ) )
return res
elif not res['Value']:
gLogger.verbose( "Transformation %s does not exist" % ( transName ) )
return S_ERROR( "Transformation does not exist" )
return S_OK( res['Value'][0][0] )
def __deleteTransformation( self, transID, connection = False ):
req = "DELETE FROM Transformations WHERE TransformationID=%d;" % transID
return self._update( req, connection )
def __updateFilters( self, connection = False ):
""" Get filters for all defined input streams in all the transformations.
If transID argument is given, get filters only for this transformation.
"""
resultList = []
# Define the general filter first
self.database_name = self.__class__.__name__
value = Operations().getValue( 'InputDataFilter/%sFilter' % self.database_name, '' )
if value:
refilter = re.compile( value )
resultList.append( ( 0, refilter ) )
# Per transformation filters
req = "SELECT TransformationID,FileMask FROM Transformations;"
res = self._query( req, connection )
if not res['OK']:
return res
for transID, mask in res['Value']:
if mask:
refilter = re.compile( mask )
resultList.append( ( transID, refilter ) )
self.filters = resultList
return S_OK( resultList )
def __filterFile( self, lfn, filters = None ):
"""Pass the input file through a supplied filter or those currently active """
result = []
if filters:
for transID, refilter in filters:
if refilter.search( lfn ):
result.append( transID )
else:
for transID, refilter in self.filters:
if refilter.search( lfn ):
result.append( transID )
return result
###########################################################################
#
# These methods manipulate the AdditionalParameters tables
#
def setTransformationParameter( self, transName, paramName, paramValue, author = '', connection = False ):
""" Add a parameter for the supplied transformations """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
message = ''
if paramName in self.TRANSPARAMS:
res = self.__updateTransformationParameter( transID, paramName, paramValue, connection = connection )
if res['OK']:
pv = self._escapeString( paramValue )
if not pv['OK']:
return S_ERROR( "Failed to parse parameter value" )
paramValue = pv['Value']
message = '%s updated to %s' % ( paramName, paramValue )
else:
res = self.__addAdditionalTransformationParameter( transID, paramName, paramValue, connection = connection )
if res['OK']:
message = 'Added additional parameter %s' % paramName
if message:
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def getAdditionalParameters( self, transName, connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__getAdditionalParameters( transID, connection = connection )
def deleteTransformationParameter( self, transName, paramName, author = '', connection = False ):
""" Delete a parameter from the additional parameters table """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if paramName in self.TRANSPARAMS:
return S_ERROR( "Can not delete core transformation parameter" )
res = self.__deleteTransformationParameters( transID, parameters = [paramName], connection = connection )
if not res['OK']:
return res
self.__updateTransformationLogging( transID, 'Removed additional parameter %s' % paramName, author,
connection = connection )
return res
def __addAdditionalTransformationParameter( self, transID, paramName, paramValue, connection = False ):
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d AND ParameterName='%s'" % ( transID, paramName )
res = self._update( req, connection )
if not res['OK']:
return res
res = self._escapeString( paramValue )
if not res['OK']:
return S_ERROR( "Failed to parse parameter value" )
paramValue = res['Value']
paramType = 'StringType'
if isinstance( paramValue, ( int, long ) ):
paramType = 'IntType'
req = "INSERT INTO AdditionalParameters (%s) VALUES (%s,'%s',%s,'%s');" % ( ', '.join( self.ADDITIONALPARAMETERS ),
transID, paramName,
paramValue, paramType )
return self._update( req, connection )
def __getAdditionalParameters( self, transID, connection = False ):
req = "SELECT %s FROM AdditionalParameters WHERE TransformationID = %d" % ( ', '.join( self.ADDITIONALPARAMETERS ),
transID )
res = self._query( req, connection )
if not res['OK']:
return res
paramDict = {}
for transID, parameterName, parameterValue, parameterType in res['Value']:
parameterType = eval( parameterType )
if parameterType in [IntType, LongType]:
parameterValue = int( parameterValue )
paramDict[parameterName] = parameterValue
return S_OK( paramDict )
def __deleteTransformationParameters( self, transID, parameters = [], connection = False ):
""" Remove the parameters associated to a transformation """
req = "DELETE FROM AdditionalParameters WHERE TransformationID=%d" % transID
if parameters:
req = "%s AND ParameterName IN (%s);" % ( req, stringListToString( parameters ) )
return self._update( req, connection )
###########################################################################
#
# These methods manipulate the TransformationFiles table
#
def addFilesToTransformation( self, transName, lfns, connection = False ):
""" Add a list of LFNs to the transformation directly """
if not lfns:
return S_ERROR( 'Zero length LFN list' )
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
fileIDs, _lfnFilesIDs = res['Value']
failed = {}
successful = {}
missing = []
fileIDsValues = set( fileIDs.values() )
for lfn in lfns:
if lfn not in fileIDsValues:
missing.append( lfn )
if missing:
res = self.__addDataFiles( missing, connection = connection )
if not res['OK']:
return res
for lfn, fileID in res['Value'].items():
fileIDs[fileID] = lfn
# must update the fileIDs
if fileIDs:
res = self.__addFilesToTransformation( transID, fileIDs.keys(), connection = connection )
if not res['OK']:
return res
for fileID in fileIDs.keys():
lfn = fileIDs[fileID]
successful[lfn] = "Present"
if fileID in res['Value']:
successful[lfn] = "Added"
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def getTransformationFiles( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate',
orderAttribute = None, limit = None, offset = None, connection = False ):
""" Get files for the supplied transformations with support for the web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM TransformationFiles" % ( intListToString( self.TRANSFILEPARAMS ) )
originalFileIDs = {}
if condDict or older or newer:
if condDict.has_key( 'LFN' ):
lfns = condDict.pop( 'LFN' )
if isinstance( lfns, basestring ):
lfns = [lfns]
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
originalFileIDs, _ignore = res['Value']
condDict['FileID'] = originalFileIDs.keys()
for val in condDict.itervalues():
if not val:
return S_OK( [] )
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit,
offset = offset ) )
res = self._query( req, connection )
if not res['OK']:
return res
transFiles = res['Value']
fileIDs = [int( row[1] ) for row in transFiles]
webList = []
resultList = []
if not fileIDs:
originalFileIDs = {}
else:
if not originalFileIDs:
res = self.__getLfnsForFileIDs( fileIDs, connection = connection )
if not res['OK']:
return res
originalFileIDs = res['Value'][1]
for row in transFiles:
lfn = originalFileIDs[row[1]]
# Prepare the structure for the web
rList = [lfn]
fDict = {}
fDict['LFN'] = lfn
count = 0
for item in row:
fDict[self.TRANSFILEPARAMS[count]] = item
count += 1
if not isinstance( item, ( int, long ) ):
rList.append( str( item ) )
else:
rList.append( item )
webList.append( rList )
resultList.append( fDict )
result = S_OK( resultList )
# result['LFNs'] = originalFileIDs.values()
result['Records'] = webList
result['ParameterNames'] = ['LFN'] + self.TRANSFILEPARAMS
return result
def getFileSummary( self, lfns, connection = False ):
""" Get file status summary in all the transformations """
connection = self.__getConnection( connection )
condDict = {'LFN':lfns}
res = self.getTransformationFiles( condDict = condDict, connection = connection )
if not res['OK']:
return res
resDict = {}
for fileDict in res['Value']:
lfn = fileDict['LFN']
transID = fileDict['TransformationID']
if not resDict.has_key( lfn ):
resDict[lfn] = {}
if not resDict[lfn].has_key( transID ):
resDict[lfn][transID] = {}
resDict[lfn][transID] = fileDict
failedDict = {}
for lfn in lfns:
if not resDict.has_key( lfn ):
failedDict[lfn] = 'Did not exist in the Transformation database'
return S_OK( {'Successful':resDict, 'Failed':failedDict} )
def setFileStatusForTransformation( self, transID, fileStatusDict = {}, connection = False ):
""" Set file status for the given transformation, based on
fileStatusDict {fileID_A: 'statusA', fileID_B: 'statusB', ...}
The ErrorCount is incremented automatically here
"""
if not fileStatusDict:
return S_OK()
# Building the request with "ON DUPLICATE KEY UPDATE"
req = "INSERT INTO TransformationFiles (TransformationID, FileID, Status, ErrorCount, LastUpdate) VALUES "
updatesList = []
for fileID, status in fileStatusDict.items():
updatesList.append( "(%d, %d, '%s', 0, UTC_TIMESTAMP())" % ( transID, fileID, status ) )
req += ','.join( updatesList )
req += " ON DUPLICATE KEY UPDATE Status=VALUES(Status),ErrorCount=ErrorCount+1,LastUpdate=VALUES(LastUpdate)"
return self._update( req, connection )
def getTransformationStats( self, transName, connection = False ):
""" Get number of files in Transformation Table for each status """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.getCounters( 'TransformationFiles', ['TransformationID', 'Status'], {'TransformationID':transID} )
if not res['OK']:
return res
statusDict = {}
total = 0
for attrDict, count in res['Value']:
status = attrDict['Status']
if not re.search( '-', status ):
statusDict[status] = count
total += count
statusDict['Total'] = total
return S_OK( statusDict )
def getTransformationFilesCount( self, transName, field, selection = {}, connection = False ):
""" Get the number of files in the TransformationFiles table grouped by the supplied field """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
selection['TransformationID'] = transID
if field not in self.TRANSFILEPARAMS:
return S_ERROR( "Supplied field not in TransformationFiles table" )
res = self.getCounters( 'TransformationFiles', ['TransformationID', field], selection )
if not res['OK']:
return res
countDict = {}
total = 0
for attrDict, count in res['Value']:
countDict[attrDict[field]] = count
total += count
countDict['Total'] = total
return S_OK( countDict )
def __addFilesToTransformation( self, transID, fileIDs, connection = False ):
req = "SELECT FileID from TransformationFiles"
req = req + " WHERE TransformationID = %d AND FileID IN (%s);" % ( transID, intListToString( fileIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
for tupleIn in res['Value']:
fileIDs.remove( tupleIn[0] )
if not fileIDs:
return S_OK( [] )
req = "INSERT INTO TransformationFiles (TransformationID,FileID,LastUpdate,InsertedTime) VALUES"
for fileID in fileIDs:
req = "%s (%d,%d,UTC_TIMESTAMP(),UTC_TIMESTAMP())," % ( req, transID, fileID )
req = req.rstrip( ',' )
res = self._update( req, connection )
if not res['OK']:
return res
return S_OK( fileIDs )
def __addExistingFiles( self, transID, connection = False ):
""" Add files that already exist in the DataFiles table to the transformation specified by the transID
"""
for tID, _filter in self.filters:
if tID == transID:
filters = [( tID, filter )]
break
if not filters:
return S_ERROR( 'No filters defined for transformation %d' % transID )
res = self.__getAllFileIDs( connection = connection )
if not res['OK']:
return res
fileIDs, _lfnFilesIDs = res['Value']
passFilter = []
for fileID, lfn in fileIDs.items():
if self.__filterFile( lfn, filters ):
passFilter.append( fileID )
return self.__addFilesToTransformation( transID, passFilter, connection = connection )
def __insertExistingTransformationFiles( self, transID, fileTuplesList, connection = False ):
""" Inserting already transformation files in TransformationFiles table (e.g. for deriving transformations)
"""
gLogger.info( "Inserting %d files in TransformationFiles" % len( fileTuplesList ) )
# splitting in various chunks, in case it is too big
for fileTuples in breakListIntoChunks( fileTuplesList, 10000 ):
gLogger.verbose( "Adding first %d files in TransformationFiles (out of %d)" % ( len( fileTuples ),
len( fileTuplesList ) ) )
req = "INSERT INTO TransformationFiles (TransformationID,Status,TaskID,FileID,TargetSE,UsedSE,LastUpdate) VALUES"
candidates = False
for ft in fileTuples:
_lfn, originalID, fileID, status, taskID, targetSE, usedSE, _errorCount, _lastUpdate, _insertTime = ft[:10]
if status not in ( 'Removed', ):
candidates = True
if not re.search( '-', status ):
status = "%s-inherited" % status
if taskID:
# Should be readable up to 999,999 tasks: that field is an int(11) in the DB, not a string
taskID = 1000000 * int( originalID ) + int( taskID )
req = "%s (%d,'%s','%d',%d,'%s','%s',UTC_TIMESTAMP())," % ( req, transID, status, taskID,
fileID, targetSE, usedSE )
if not candidates:
continue
req = req.rstrip( "," )
res = self._update( req, connection )
if not res['OK']:
return res
return S_OK()
def __assignTransformationFile( self, transID, taskID, se, fileIDs, connection = False ):
""" Make necessary updates to the TransformationFiles table for the newly created task
"""
req = "UPDATE TransformationFiles SET TaskID='%d',UsedSE='%s',Status='Assigned',LastUpdate=UTC_TIMESTAMP()"
req = ( req + " WHERE TransformationID = %d AND FileID IN (%s);" ) % ( taskID, se, transID, intListToString( fileIDs ) )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to assign file to task", res['Message'] )
fileTuples = []
for fileID in fileIDs:
fileTuples.append( ( "(%d,%d,%d)" % ( transID, fileID, taskID ) ) )
req = "INSERT INTO TransformationFileTasks (TransformationID,FileID,TaskID) VALUES %s" % ','.join( fileTuples )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to assign file to task", res['Message'] )
return res
def __setTransformationFileStatus( self, fileIDs, status, connection = False ):
req = "UPDATE TransformationFiles SET Status = '%s' WHERE FileID IN (%s);" % ( status, intListToString( fileIDs ) )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to update file status", res['Message'] )
return res
def __setTransformationFileUsedSE( self, fileIDs, usedSE, connection = False ):
req = "UPDATE TransformationFiles SET UsedSE = '%s' WHERE FileID IN (%s);" % ( usedSE, intListToString( fileIDs ) )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to update file usedSE", res['Message'] )
return res
def __resetTransformationFile( self, transID, taskID, connection = False ):
req = "UPDATE TransformationFiles SET TaskID=NULL, UsedSE='Unknown', Status='Unused'\
WHERE TransformationID = %d AND TaskID=%d;" % ( transID, taskID )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to reset transformation file", res['Message'] )
return res
def __deleteTransformationFiles( self, transID, connection = False ):
""" Remove the files associated to a transformation """
req = "DELETE FROM TransformationFiles WHERE TransformationID = %d;" % transID
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to delete transformation files", res['Message'] )
return res
###########################################################################
#
# These methods manipulate the TransformationFileTasks table
#
def __deleteTransformationFileTask( self, transID, taskID, connection = False ):
''' Delete the file associated to a given task of a given transformation
from the TransformationFileTasks table for transformation with TransformationID and TaskID
'''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID=%d AND TaskID=%d" % ( transID, taskID )
return self._update( req, connection )
def __deleteTransformationFileTasks( self, transID, connection = False ):
''' Remove all associations between files, tasks and a transformation '''
req = "DELETE FROM TransformationFileTasks WHERE TransformationID = %d;" % transID
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "Failed to delete transformation files/task history", res['Message'] )
return res
###########################################################################
#
# These methods manipulate the TransformationTasks table
#
def getTransformationTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'CreationTime',
orderAttribute = None, limit = None, inputVector = False,
offset = None, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT %s FROM TransformationTasks %s" % ( intListToString( self.TASKSPARAMS ),
self.buildCondition( condDict, older, newer, timeStamp,
orderAttribute, limit, offset = offset ) )
res = self._query( req, connection )
if not res['OK']:
return res
webList = []
resultList = []
for row in res['Value']:
# Prepare the structure for the web
rList = []
taskDict = {}
count = 0
for item in row:
taskDict[self.TASKSPARAMS[count]] = item
count += 1
if not isinstance( item, ( int, long ) ):
rList.append( str( item ) )
else:
rList.append( item )
webList.append( rList )
if inputVector:
taskDict['InputVector'] = ''
taskID = taskDict['TaskID']
transID = taskDict['TransformationID']
res = self.getTaskInputVector( transID, taskID )
if res['OK']:
if res['Value'].has_key( taskID ):
taskDict['InputVector'] = res['Value'][taskID]
resultList.append( taskDict )
result = S_OK( resultList )
result['Records'] = webList
result['ParameterNames'] = self.TASKSPARAMS
return result
def getTasksForSubmission( self, transName, numTasks = 1, site = '', statusList = ['Created'],
older = None, newer = None, connection = False ):
""" Select tasks with the given status (and site) for submission """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
condDict = {"TransformationID":transID}
if statusList:
condDict["ExternalStatus"] = statusList
if site:
numTasks = 0
res = self.getTransformationTasks( condDict = condDict, older = older, newer = newer,
timeStamp = 'CreationTime', orderAttribute = None, limit = numTasks,
inputVector = True, connection = connection )
if not res['OK']:
return res
tasks = res['Value']
# Now prepare the tasks
resultDict = {}
for taskDict in tasks:
if len( resultDict ) >= numTasks:
break
taskDict['Status'] = taskDict.pop( 'ExternalStatus' )
taskDict['InputData'] = taskDict.pop( 'InputVector' )
taskDict.pop( 'LastUpdateTime' )
taskDict.pop( 'CreationTime' )
taskDict.pop( 'ExternalID' )
taskID = taskDict['TaskID']
resultDict[taskID] = taskDict
if site:
resultDict[taskID]['Site'] = site
return S_OK( resultDict )
def deleteTasks( self, transName, taskIDbottom, taskIDtop, author = '', connection = False ):
""" Delete tasks with taskID range in transformation """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
for taskID in range( taskIDbottom, taskIDtop + 1 ):
res = self.__removeTransformationTask( transID, taskID, connection = connection )
if not res['OK']:
return res
message = "Deleted tasks from %d to %d" % ( taskIDbottom, taskIDtop )
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def reserveTask( self, transName, taskID, connection = False ):
""" Reserve the taskID from transformation for submission """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__checkUpdate( "TransformationTasks", "ExternalStatus", "Reserved", {"TransformationID":transID,
"TaskID":taskID},
connection = connection )
if not res['OK']:
return res
if not res['Value']:
return S_ERROR( 'Failed to set Reserved status for job %d - already Reserved' % int( taskID ) )
# The job is reserved, update the time stamp
res = self.setTaskStatus( transID, taskID, 'Reserved', connection = connection )
if not res['OK']:
return S_ERROR( 'Failed to set Reserved status for job %d - failed to update the time stamp' % int( taskID ) )
return S_OK()
def setTaskStatusAndWmsID( self, transName, taskID, status, taskWmsID, connection = False ):
""" Set status and ExternalID for job with taskID in production with transformationID
"""
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__setTaskParameterValue( transID, taskID, 'ExternalStatus', status, connection = connection )
if not res['OK']:
return res
return self.__setTaskParameterValue( transID, taskID, 'ExternalID', taskWmsID, connection = connection )
def setTaskStatus( self, transName, taskID, status, connection = False ):
""" Set status for job with taskID in production with transformationID """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if not isinstance( taskID, list ):
taskIDList = [taskID]
else:
taskIDList = list( taskID )
for taskID in taskIDList:
res = self.__setTaskParameterValue( transID, taskID, 'ExternalStatus', status, connection = connection )
if not res['OK']:
return res
return S_OK()
def getTransformationTaskStats( self, transName = '', connection = False ):
""" Returns dictionary with number of jobs per status for the given production.
"""
connection = self.__getConnection( connection )
if transName:
res = self._getTransformationID( transName, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get ID for transformation", res['Message'] )
return res
res = self.getCounters( 'TransformationTasks', ['ExternalStatus'], {'TransformationID':res['Value']},
connection = connection )
else:
res = self.getCounters( 'TransformationTasks', ['ExternalStatus', 'TransformationID'], {},
connection = connection )
if not res['OK']:
return res
statusDict = {}
total = 0
for attrDict, count in res['Value']:
status = attrDict['ExternalStatus']
statusDict[status] = count
total += count
statusDict['TotalCreated'] = total
return S_OK( statusDict )
def __setTaskParameterValue( self, transID, taskID, paramName, paramValue, connection = False ):
req = "UPDATE TransformationTasks SET %s='%s', LastUpdateTime=UTC_TIMESTAMP()" % ( paramName, paramValue )
req = req + " WHERE TransformationID=%d AND TaskID=%d;" % ( transID, taskID )
return self._update( req, connection )
def __deleteTransformationTasks( self, transID, connection = False ):
""" Delete all the tasks from the TransformationTasks table for transformation with TransformationID
"""
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d" % transID
return self._update( req, connection )
def __deleteTransformationTask( self, transID, taskID, connection = False ):
""" Delete the task from the TransformationTasks table for transformation with TransformationID
"""
req = "DELETE FROM TransformationTasks WHERE TransformationID=%d AND TaskID=%d" % ( transID, taskID )
return self._update( req, connection )
####################################################################
#
# These methods manipulate the TransformationInputDataQuery table
#
def createTransformationInputDataQuery( self, transName, queryDict, author = '', connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
return self.__addInputDataQuery( transID, queryDict, author = author, connection = connection )
def __addInputDataQuery( self, transID, queryDict, author = '', connection = False ):
res = self.getTransformationInputDataQuery( transID, connection = connection )
if res['OK']:
return S_ERROR( "Input data query already exists for transformation" )
if res['Message'] != 'No InputDataQuery found for transformation':
return res
for parameterName in sorted( queryDict.keys() ):
parameterValue = queryDict[parameterName]
if not parameterValue:
continue
parameterType = 'String'
if isinstance( parameterValue, ( list, tuple ) ):
if isinstance( parameterValue[0], ( int, long ) ):
parameterType = 'Integer'
parameterValue = [str( x ) for x in parameterValue]
parameterValue = ';;;'.join( parameterValue )
else:
if isinstance( parameterValue, ( int, long ) ):
parameterType = 'Integer'
parameterValue = str( parameterValue )
if isinstance( parameterValue, dict ):
parameterType = 'Dict'
parameterValue = str( parameterValue )
res = self.insertFields( 'TransformationInputDataQuery', ['TransformationID', 'ParameterName',
'ParameterValue', 'ParameterType'],
[transID, parameterName, parameterValue, parameterType], conn = connection )
if not res['OK']:
message = 'Failed to add input data query'
self.deleteTransformationInputDataQuery( transID, connection = connection )
break
else:
message = 'Added input data query'
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def deleteTransformationInputDataQuery( self, transName, author = '', connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "DELETE FROM TransformationInputDataQuery WHERE TransformationID=%d;" % transID
res = self._update( req, connection )
if not res['OK']:
return res
if res['Value']:
# Add information to the transformation logging
message = 'Deleted input data query'
self.__updateTransformationLogging( transID, message, author, connection = connection )
return res
def getTransformationInputDataQuery( self, transName, connection = False ):
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "SELECT ParameterName,ParameterValue,ParameterType FROM TransformationInputDataQuery"
req = req + " WHERE TransformationID=%d;" % transID
res = self._query( req, connection )
if not res['OK']:
return res
queryDict = {}
for parameterName, parameterValue, parameterType in res['Value']:
if re.search( ';;;', str( parameterValue ) ):
parameterValue = parameterValue.split( ';;;' )
if parameterType == 'Integer':
parameterValue = [int( x ) for x in parameterValue]
elif parameterType == 'Integer':
parameterValue = int( parameterValue )
elif parameterType == 'Dict':
parameterValue = eval( parameterValue )
queryDict[parameterName] = parameterValue
if not queryDict:
return S_ERROR( "No InputDataQuery found for transformation" )
return S_OK( queryDict )
###########################################################################
#
# These methods manipulate the TaskInputs table
#
def getTaskInputVector( self, transName, taskID, connection = False ):
""" Get input vector for the given task """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
if not isinstance( taskID, list ):
taskIDList = [taskID]
else:
taskIDList = list( taskID )
taskString = ','.join( ["'" + str( x ) + "'" for x in taskIDList] )
req = "SELECT TaskID,InputVector FROM TaskInputs WHERE TaskID in (%s) AND TransformationID='%d';" % ( taskString,
transID )
res = self._query( req )
inputVectorDict = {}
if res['OK'] and res['Value']:
for row in res['Value']:
inputVectorDict[row[0]] = row[1]
return S_OK( inputVectorDict )
def __insertTaskInputs( self, transID, taskID, lfns, connection = False ):
vector = str.join( ';', lfns )
fields = ['TransformationID', 'TaskID', 'InputVector']
values = [transID, taskID, vector]
res = self.insertFields( 'TaskInputs', fields, values, connection )
if not res['OK']:
gLogger.error( "Failed to add input vector to task %d" % taskID )
return res
def __deleteTransformationTaskInputs( self, transID, taskID = 0, connection = False ):
""" Delete all the tasks inputs from the TaskInputs table for transformation with TransformationID
"""
req = "DELETE FROM TaskInputs WHERE TransformationID=%d" % transID
if taskID:
req = "%s AND TaskID=%d" % ( req, int( taskID ) )
return self._update( req, connection )
###########################################################################
#
# These methods manipulate the TransformationLog table
#
def __updateTransformationLogging( self, transName, message, authorDN, connection = False ):
""" Update the Transformation log table with any modifications
"""
if not authorDN:
res = getProxyInfo( False, False )
if res['OK']:
authorDN = res['Value']['subject']
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "INSERT INTO TransformationLog (TransformationID,Message,Author,MessageDate)"
req = req + " VALUES (%s,'%s','%s',UTC_TIMESTAMP());" % ( transID, message, authorDN )
return self._update( req, connection )
def getTransformationLogging( self, transName, connection = False ):
""" Get logging info from the TransformationLog table
"""
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
req = "SELECT TransformationID, Message, Author, MessageDate FROM TransformationLog"
req = req + " WHERE TransformationID=%s ORDER BY MessageDate;" % ( transID )
res = self._query( req )
if not res['OK']:
return res
transList = []
for transID, message, authorDN, messageDate in res['Value']:
transDict = {}
transDict['TransformationID'] = transID
transDict['Message'] = message
transDict['AuthorDN'] = authorDN
transDict['MessageDate'] = messageDate
transList.append( transDict )
return S_OK( transList )
def __deleteTransformationLog( self, transID, connection = False ):
""" Remove the entries in the transformation log for a transformation
"""
req = "DELETE FROM TransformationLog WHERE TransformationID=%d;" % transID
return self._update( req, connection )
###########################################################################
#
# These methods manipulate the DataFiles table
#
def __getAllFileIDs( self, connection = False ):
""" Get all the fileIDs for the supplied list of lfns
"""
req = "SELECT LFN,FileID FROM DataFiles;"
res = self._query( req, connection )
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[fileID] = lfn
lfns[lfn] = fileID
return S_OK( ( fids, lfns ) )
def __getFileIDsForLfns( self, lfns, connection = False ):
""" Get file IDs for the given list of lfns
warning: if the file is not present, we'll see no errors
"""
req = "SELECT LFN,FileID FROM DataFiles WHERE LFN in (%s);" % ( stringListToString( lfns ) )
res = self._query( req, connection )
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[fileID] = lfn
lfns[lfn] = fileID
return S_OK( ( fids, lfns ) )
def __getLfnsForFileIDs( self, fileIDs, connection = False ):
""" Get lfns for the given list of fileIDs
"""
req = "SELECT LFN,FileID FROM DataFiles WHERE FileID in (%s);" % stringListToString( fileIDs )
res = self._query( req, connection )
if not res['OK']:
return res
fids = {}
lfns = {}
for lfn, fileID in res['Value']:
fids[lfn] = fileID
lfns[fileID] = lfn
return S_OK( ( fids, lfns ) )
def __addDataFiles( self, lfns, connection = False ):
""" Add a file to the DataFiles table and retrieve the FileIDs
"""
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
_fileIDs, lfnFileIDs = res['Value']
for lfn in lfns:
if not lfn in lfnFileIDs.keys():
req = "INSERT INTO DataFiles (LFN,Status) VALUES ('%s','New');" % lfn
res = self._update( req, connection )
if not res['OK']:
return res
lfnFileIDs[lfn] = res['lastRowId']
return S_OK( lfnFileIDs )
def __setDataFileStatus( self, fileIDs, status, connection = False ):
""" Set the status of the supplied files
"""
req = "UPDATE DataFiles SET Status = '%s' WHERE FileID IN (%s);" % ( status, intListToString( fileIDs ) )
return self._update( req, connection )
###########################################################################
#
# These methods manipulate multiple tables
#
def addTaskForTransformation( self, transID, lfns = [], se = 'Unknown', connection = False ):
""" Create a new task with the supplied files for a transformation.
"""
res = self._getConnectionTransID( connection, transID )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
# Be sure the all the supplied LFNs are known to the database for the supplied transformation
fileIDs = []
if lfns:
res = self.getTransformationFiles( condDict = {'TransformationID':transID, 'LFN':lfns}, connection = connection )
if not res['OK']:
return res
foundLfns = set()
for fileDict in res['Value']:
fileIDs.append( fileDict['FileID'] )
lfn = fileDict['LFN']
if fileDict['Status'] in self.allowedStatusForTasks:
foundLfns.add( lfn )
else:
gLogger.error( "Supplied file not in %s status but %s" % ( self.allowedStatusForTasks, fileDict['Status'] ), lfn )
unavailableLfns = set( lfns ) - foundLfns
if unavailableLfns:
gLogger.error( "Supplied files not found for transformation", sorted( unavailableLfns ) )
return S_ERROR( "Not all supplied files available in the transformation database" )
# Insert the task into the jobs table and retrieve the taskID
self.lock.acquire()
req = "INSERT INTO TransformationTasks(TransformationID, ExternalStatus, ExternalID, TargetSE,"
req = req + " CreationTime, LastUpdateTime)"
req = req + " VALUES (%s,'%s','%d','%s', UTC_TIMESTAMP(), UTC_TIMESTAMP());" % ( transID, 'Created', 0, se )
res = self._update( req, connection )
if not res['OK']:
self.lock.release()
gLogger.error( "Failed to publish task for transformation", res['Message'] )
return res
# With InnoDB, TaskID is computed by a trigger, which sets the local variable @last (per connection)
# @last is the last insert TaskID. With multi-row inserts, will be the first new TaskID inserted.
# The trigger TaskID_Generator must be present with the InnoDB schema (defined in TransformationDB.sql)
if self.isTransformationTasksInnoDB:
res = self._query( "SELECT @last;", connection )
else:
res = self._query( "SELECT LAST_INSERT_ID();", connection )
self.lock.release()
if not res['OK']:
return res
taskID = int( res['Value'][0][0] )
gLogger.verbose( "Published task %d for transformation %d." % ( taskID, transID ) )
# If we have input data then update their status, and taskID in the transformation table
if lfns:
res = self.__insertTaskInputs( transID, taskID, lfns, connection = connection )
if not res['OK']:
self.__removeTransformationTask( transID, taskID, connection = connection )
return res
res = self.__assignTransformationFile( transID, taskID, se, fileIDs, connection = connection )
if not res['OK']:
self.__removeTransformationTask( transID, taskID, connection = connection )
return res
return S_OK( taskID )
def extendTransformation( self, transName, nTasks, author = '', connection = False ):
""" Extend SIMULATION type transformation by nTasks number of tasks
"""
connection = self.__getConnection( connection )
res = self.getTransformation( transName, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get transformation details", res['Message'] )
return res
transType = res['Value']['Type']
transID = res['Value']['TransformationID']
extendableProds = Operations().getValue( 'Transformations/ExtendableTransfTypes', ['Simulation', 'MCSimulation'] )
if transType.lower() not in [ep.lower() for ep in extendableProds]:
return S_ERROR( 'Can not extend non-SIMULATION type production' )
taskIDs = []
for _task in range( nTasks ):
res = self.addTaskForTransformation( transID, connection = connection )
if not res['OK']:
return res
taskIDs.append( res['Value'] )
# Add information to the transformation logging
message = 'Transformation extended by %d tasks' % nTasks
self.__updateTransformationLogging( transName, message, author, connection = connection )
return S_OK( taskIDs )
def cleanTransformation( self, transName, author = '', connection = False ):
""" Clean the transformation specified by name or id """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.__deleteTransformationFileTasks( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationFiles( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationTaskInputs( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationTasks( transID, connection = connection )
if not res['OK']:
return res
self.__updateTransformationLogging( transID, "Transformation Cleaned", author, connection = connection )
return S_OK( transID )
def deleteTransformation( self, transName, author = '', connection = False ):
""" Remove the transformation specified by name or id """
res = self._getConnectionTransID( connection, transName )
if not res['OK']:
return res
connection = res['Value']['Connection']
transID = res['Value']['TransformationID']
res = self.cleanTransformation( transID, author = author, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationLog( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationParameters( transID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformation( transID, connection = connection )
if not res['OK']:
return res
res = self.__updateFilters()
if not res['OK']:
return res
return S_OK()
def __removeTransformationTask( self, transID, taskID, connection = False ):
res = self.__deleteTransformationTaskInputs( transID, taskID, connection = connection )
if not res['OK']:
return res
res = self.__deleteTransformationFileTask( transID, taskID, connection = connection )
if not res['OK']:
return res
res = self.__resetTransformationFile( transID, taskID, connection = connection )
if not res['OK']:
return res
return self.__deleteTransformationTask( transID, taskID, connection = connection )
def __checkUpdate( self, table, param, paramValue, selectDict = {}, connection = False ):
""" Check whether the update will perform an update """
req = "UPDATE %s SET %s = '%s'" % ( table, param, paramValue )
if selectDict:
req = "%s %s" % ( req, self.buildCondition( selectDict ) )
return self._update( req, connection )
def __getConnection( self, connection ):
if connection:
return connection
res = self._getConnection()
if res['OK']:
return res['Value']
gLogger.warn( "Failed to get MySQL connection", res['Message'] )
return connection
def _getConnectionTransID( self, connection, transName ):
connection = self.__getConnection( connection )
res = self._getTransformationID( transName, connection = connection )
if not res['OK']:
gLogger.error( "Failed to get ID for transformation", res['Message'] )
return res
transID = res['Value']
resDict = {'Connection':connection, 'TransformationID':transID}
return S_OK( resDict )
####################################################################################
#
# This part should correspond to the DIRAC Standard File Catalog interface
#
####################################################################################
def exists( self, lfns, connection = False ):
""" Check the presence of the lfn in the TransformationDB DataFiles table
"""
gLogger.info( "TransformationDB.exists: Attempting to determine existence of %s files." % len( lfns ) )
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
fileIDs, _lfnFilesIDs = res['Value']
failed = {}
successful = {}
fileIDsValues = set( fileIDs.values() )
for lfn in lfns:
if not lfn in fileIDsValues:
successful[lfn] = False
else:
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def addFile( self, fileDicts, force = False, connection = False ):
""" Add a new file to the TransformationDB together with its first replica.
In the input dict, the only mandatory info are PFN and SE
"""
gLogger.info( "TransformationDB.addFile: Attempting to add %s files." % len( fileDicts.keys() ) )
successful = {}
failed = {}
# Determine which files pass the filters and are to be added to transformations
transFiles = {}
filesToAdd = []
for lfn in fileDicts.keys():
fileTrans = self.__filterFile( lfn )
if not ( fileTrans or force ):
successful[lfn] = True
else:
filesToAdd.append( lfn )
for trans in fileTrans:
if not transFiles.has_key( trans ):
transFiles[trans] = []
transFiles[trans].append( lfn )
# Add the files to the DataFiles and Replicas tables
if filesToAdd:
connection = self.__getConnection( connection )
res = self.__addDataFiles( filesToAdd, connection = connection )
if not res['OK']:
return res
lfnFileIDs = res['Value']
for lfn in filesToAdd:
if lfnFileIDs.has_key( lfn ):
successful[lfn] = True
else:
failed[lfn] = True
# Add the files to the transformations
# TODO: THIS SHOULD BE TESTED WITH A TRANSFORMATION WITH A FILTER
for transID, lfns in transFiles.items():
fileIDs = []
for lfn in lfns:
if lfnFileIDs.has_key( lfn ):
fileIDs.append( lfnFileIDs[lfn] )
if fileIDs:
res = self.__addFilesToTransformation( transID, fileIDs, connection = connection )
if not res['OK']:
gLogger.error( "Failed to add files to transformation", "%s %s" % ( transID, res['Message'] ) )
failed[lfn] = True
successful[lfn] = False
else:
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeFile( self, lfns, connection = False ):
""" Remove file specified by lfn from the ProcessingDB
"""
gLogger.info( "TransformationDB.removeFile: Attempting to remove %s files." % len( lfns ) )
failed = {}
successful = {}
connection = self.__getConnection( connection )
if not lfns:
return S_ERROR( "No LFNs supplied" )
res = self.__getFileIDsForLfns( lfns, connection = connection )
if not res['OK']:
return res
fileIDs, lfnFilesIDs = res['Value']
for lfn in lfns:
if not lfnFilesIDs.has_key( lfn ):
successful[lfn] = 'File did not exist'
if fileIDs:
res = self.__setTransformationFileStatus( fileIDs.keys(), 'Deleted', connection = connection )
if not res['OK']:
return res
res = self.__setDataFileStatus( fileIDs.keys(), 'Deleted', connection = connection )
if not res['OK']:
return S_ERROR( "TransformationDB.removeFile: Failed to remove files." )
for lfn in lfnFilesIDs.keys():
if not failed.has_key( lfn ):
successful[lfn] = True
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def addDirectory( self, path, force = False ):
""" Adds all the files stored in a given directory in file catalog """
gLogger.info( "TransformationDB.addDirectory: Attempting to populate %s." % path )
res = pythonCall( 30, self.__addDirectory, path, force )
if not res['OK']:
gLogger.error( "Failed to invoke addDirectory with shifter proxy" )
return res
return res['Value']
def __addDirectory( self, path, force ):
res = setupShifterProxyInEnv( "ProductionManager" )
if not res['OK']:
return S_OK( "Failed to setup shifter proxy" )
catalog = FileCatalog()
start = time.time()
res = catalog.listDirectory( path )
if not res['OK']:
gLogger.error( "TransformationDB.addDirectory: Failed to get files. %s" % res['Message'] )
return res
if not path in res['Value']['Successful']:
gLogger.error( "TransformationDB.addDirectory: Failed to get files." )
return res
gLogger.info( "TransformationDB.addDirectory: Obtained %s files in %s seconds." % ( path, time.time() - start ) )
successful = []
failed = []
for lfn in res['Value']['Successful'][path]["Files"].keys():
res = self.addFile( {lfn:{}}, force = force )
if not res['OK']:
failed.append( lfn )
continue
if not lfn in res['Value']['Successful']:
failed.append( lfn )
else:
successful.append( lfn )
return {"OK":True, "Value": len( res['Value']['Successful'] ), "Successful":successful, "Failed": failed }
|
marcelovilaca/DIRAC
|
TransformationSystem/DB/TransformationDB.py
|
Python
|
gpl-3.0
| 68,496
|
[
"DIRAC"
] |
0232987408b9e22ebe2b00e1d1fa02c4fbdc344599319a64c44a20554a205fbc
|
# $Id: nodes.py 7788 2015-02-16 22:10:52Z milde $
# Author: David Goodger <goodger@python.org>
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""
Docutils document tree element class library.
Classes in CamelCase are abstract base classes or auxiliary classes. The one
exception is `Text`, for a text (PCDATA) node; uppercase is used to
differentiate from element classes. Classes in lower_case_with_underscores
are element classes, matching the XML element generic identifiers in the DTD_.
The position of each node (the level at which it can occur) is significant and
is represented by abstract base classes (`Root`, `Structural`, `Body`,
`Inline`, etc.). Certain transformations will be easier because we can use
``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy.
.. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
"""
__docformat__ = 'reStructuredText'
import sys
import os
import re
import warnings
import types
import unicodedata
# ==============================
# Functional Node Base Classes
# ==============================
class Node(object):
"""Abstract base class of nodes in a document tree."""
parent = None
"""Back-reference to the Node immediately containing this Node."""
document = None
"""The `document` node at the root of the tree containing this Node."""
source = None
"""Path or description of the input source which generated this Node."""
line = None
"""The line number (1-based) of the beginning of this Node in `source`."""
def __nonzero__(self):
"""
Node instances are always true, even if they're empty. A node is more
than a simple container. Its boolean "truth" does not depend on
having one or more subnodes in the doctree.
Use `len()` to check node length. Use `None` to represent a boolean
false value.
"""
return True
if sys.version_info < (3,):
# on 2.x, str(node) will be a byte string with Unicode
# characters > 255 escaped; on 3.x this is no longer necessary
def __str__(self):
return unicode(self).encode('raw_unicode_escape')
def asdom(self, dom=None):
"""Return a DOM **fragment** representation of this Node."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
return self._dom_node(domroot)
def pformat(self, indent=' ', level=0):
"""
Return an indented pseudo-XML representation, for test purposes.
Override in subclasses.
"""
raise NotImplementedError
def copy(self):
"""Return a copy of self."""
raise NotImplementedError
def deepcopy(self):
"""Return a deep copy of self (also copying children)."""
raise NotImplementedError
def setup_child(self, child):
child.parent = self
if self.document:
child.document = self.document
if child.source is None:
child.source = self.document.current_source
if child.line is None:
child.line = self.document.current_line
def walk(self, visitor):
"""
Traverse a tree of `Node` objects, calling the
`dispatch_visit()` method of `visitor` when entering each
node. (The `walkabout()` method is similar, except it also
calls the `dispatch_departure()` method before exiting each
node.)
This tree traversal supports limited in-place tree
modifications. Replacing one node with one or more nodes is
OK, as is removing an element. However, if the node removed
or replaced occurs after the current node, the old node will
still be traversed, and any new nodes will not.
Within ``visit`` methods (and ``depart`` methods for
`walkabout()`), `TreePruningException` subclasses may be raised
(`SkipChildren`, `SkipSiblings`, `SkipNode`, `SkipDeparture`).
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` implementation for each `Node` subclass encountered.
Return true if we should stop the traversal.
"""
stop = False
visitor.document.reporter.debug(
'docutils.nodes.Node.walk calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except (SkipChildren, SkipNode):
return stop
except SkipDeparture: # not applicable; ignore
pass
children = self.children
try:
for child in children[:]:
if child.walk(visitor):
stop = True
break
except SkipSiblings:
pass
except StopTraversal:
stop = True
return stop
def walkabout(self, visitor):
"""
Perform a tree traversal similarly to `Node.walk()` (which
see), except also call the `dispatch_departure()` method
before exiting each node.
Parameter `visitor`: A `NodeVisitor` object, containing a
``visit`` and ``depart`` implementation for each `Node`
subclass encountered.
Return true if we should stop the traversal.
"""
call_depart = True
stop = False
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_visit for %s'
% self.__class__.__name__)
try:
try:
visitor.dispatch_visit(self)
except SkipNode:
return stop
except SkipDeparture:
call_depart = False
children = self.children
try:
for child in children[:]:
if child.walkabout(visitor):
stop = True
break
except SkipSiblings:
pass
except SkipChildren:
pass
except StopTraversal:
stop = True
if call_depart:
visitor.document.reporter.debug(
'docutils.nodes.Node.walkabout calling dispatch_departure '
'for %s' % self.__class__.__name__)
visitor.dispatch_departure(self)
return stop
def _fast_traverse(self, cls):
"""Specialized traverse() that only supports instance checks."""
result = []
if isinstance(self, cls):
result.append(self)
for child in self.children:
result.extend(child._fast_traverse(cls))
return result
def _all_traverse(self):
"""Specialized traverse() that doesn't check for a condition."""
result = []
result.append(self)
for child in self.children:
result.extend(child._all_traverse())
return result
def traverse(self, condition=None, include_self=True, descend=True,
siblings=False, ascend=False):
"""
Return an iterable containing
* self (if include_self is true)
* all descendants in tree traversal order (if descend is true)
* all siblings (if siblings is true) and their descendants (if
also descend is true)
* the siblings of the parent (if ascend is true) and their
descendants (if also descend is true), and so on
If `condition` is not None, the iterable contains only nodes
for which ``condition(node)`` is true. If `condition` is a
node class ``cls``, it is equivalent to a function consisting
of ``return isinstance(node, cls)``.
If ascend is true, assume siblings to be true as well.
For example, given the following tree::
<paragraph>
<emphasis> <--- emphasis.traverse() and
<strong> <--- strong.traverse() are called.
Foo
Bar
<reference name="Baz" refid="baz">
Baz
Then list(emphasis.traverse()) equals ::
[<emphasis>, <strong>, <#text: Foo>, <#text: Bar>]
and list(strong.traverse(ascend=True)) equals ::
[<strong>, <#text: Foo>, <#text: Bar>, <reference>, <#text: Baz>]
"""
if ascend:
siblings=True
# Check for special argument combinations that allow using an
# optimized version of traverse()
if include_self and descend and not siblings:
if condition is None:
return self._all_traverse()
elif isinstance(condition, (types.ClassType, type)):
return self._fast_traverse(condition)
# Check if `condition` is a class (check for TypeType for Python
# implementations that use only new-style classes, like PyPy).
if isinstance(condition, (types.ClassType, type)):
node_class = condition
def condition(node, node_class=node_class):
return isinstance(node, node_class)
r = []
if include_self and (condition is None or condition(self)):
r.append(self)
if descend and len(self.children):
for child in self:
r.extend(child.traverse(include_self=True, descend=True,
siblings=False, ascend=False,
condition=condition))
if siblings or ascend:
node = self
while node.parent:
index = node.parent.index(node)
for sibling in node.parent[index+1:]:
r.extend(sibling.traverse(include_self=True,
descend=descend,
siblings=False, ascend=False,
condition=condition))
if not ascend:
break
else:
node = node.parent
return r
def next_node(self, condition=None, include_self=False, descend=True,
siblings=False, ascend=False):
"""
Return the first node in the iterable returned by traverse(),
or None if the iterable is empty.
Parameter list is the same as of traverse. Note that
include_self defaults to 0, though.
"""
iterable = self.traverse(condition=condition,
include_self=include_self, descend=descend,
siblings=siblings, ascend=ascend)
try:
return iterable[0]
except IndexError:
return None
if sys.version_info < (3,):
class reprunicode(unicode):
"""
A unicode sub-class that removes the initial u from unicode's repr.
"""
def __repr__(self):
return unicode.__repr__(self)[1:]
else:
reprunicode = str
def ensure_str(s):
"""
Failsave conversion of `unicode` to `str`.
"""
if sys.version_info < (3,) and isinstance(s, unicode):
return s.encode('ascii', 'backslashreplace')
return s
class Text(Node, reprunicode):
"""
Instances are terminal nodes (leaves) containing text only; no child
nodes or attributes. Initialize by passing a string to the constructor.
Access the text itself with the `astext` method.
"""
tagname = '#text'
children = ()
"""Text nodes have no children, and cannot have children."""
if sys.version_info > (3,):
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
if isinstance(data, bytes):
raise TypeError('expecting str data, not bytes')
return reprunicode.__new__(cls, data)
else:
def __new__(cls, data, rawsource=None):
"""Prevent the rawsource argument from propagating to str."""
return reprunicode.__new__(cls, data)
def __init__(self, data, rawsource=''):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
def shortrepr(self, maxlen=18):
data = self
if len(data) > maxlen:
data = data[:maxlen-4] + ' ...'
return '<%s: %r>' % (self.tagname, reprunicode(data))
def __repr__(self):
return self.shortrepr(maxlen=68)
def _dom_node(self, domroot):
return domroot.createTextNode(unicode(self))
def astext(self):
return reprunicode(self)
# Note about __unicode__: The implementation of __unicode__ here,
# and the one raising NotImplemented in the superclass Node had
# to be removed when changing Text to a subclass of unicode instead
# of UserString, since there is no way to delegate the __unicode__
# call to the superclass unicode:
# unicode itself does not have __unicode__ method to delegate to
# and calling unicode(self) or unicode.__new__ directly creates
# an infinite loop
def copy(self):
return self.__class__(reprunicode(self), rawsource=self.rawsource)
def deepcopy(self):
return self.copy()
def pformat(self, indent=' ', level=0):
result = []
indent = indent * level
for line in self.splitlines():
result.append(indent + line + '\n')
return ''.join(result)
# rstrip and lstrip are used by substitution definitions where
# they are expected to return a Text instance, this was formerly
# taken care of by UserString. Note that then and now the
# rawsource member is lost.
def rstrip(self, chars=None):
return self.__class__(reprunicode.rstrip(self, chars))
def lstrip(self, chars=None):
return self.__class__(reprunicode.lstrip(self, chars))
class Element(Node):
"""
`Element` is the superclass to all specific elements.
Elements contain attributes and child nodes. Elements emulate
dictionaries for attributes, indexing by attribute name (a string). To
set the attribute 'att' to 'value', do::
element['att'] = 'value'
There are two special attributes: 'ids' and 'names'. Both are
lists of unique identifiers, and names serve as human interfaces
to IDs. Names are case- and whitespace-normalized (see the
fully_normalize_name() function), and IDs conform to the regular
expression ``[a-z](-?[a-z0-9]+)*`` (see the make_id() function).
Elements also emulate lists for child nodes (element nodes and/or text
nodes), indexing by integer. To get the first child node, use::
element[0]
Elements may be constructed using the ``+=`` operator. To add one new
child node to element, do::
element += node
This is equivalent to ``element.append(node)``.
To add a list of multiple child nodes at once, use the same ``+=``
operator::
element += [node1, node2]
This is equivalent to ``element.extend([node1, node2])``.
"""
basic_attributes = ('ids', 'classes', 'names', 'dupnames')
"""List attributes which are defined for every Element-derived class
instance and can be safely transferred to a different node."""
local_attributes = ('backrefs',)
"""A list of class-specific attributes that should not be copied with the
standard attributes when replacing a node.
NOTE: Derived classes should override this value to prevent any of its
attributes being copied by adding to the value in its parent class."""
list_attributes = basic_attributes + local_attributes
"""List attributes, automatically initialized to empty lists for
all nodes."""
known_attributes = list_attributes + ('source',)
"""List attributes that are known to the Element base class."""
tagname = None
"""The element generic identifier. If None, it is set as an instance
attribute to the name of the class."""
child_text_separator = '\n\n'
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', *children, **attributes):
self.rawsource = rawsource
"""The raw text from which this element was constructed."""
self.children = []
"""List of child nodes (elements and/or `Text`)."""
self.extend(children) # maintain parent info
self.attributes = {}
"""Dictionary of attribute {name: value}."""
# Initialize list attributes.
for att in self.list_attributes:
self.attributes[att] = []
for att, value in attributes.items():
att = att.lower()
if att in self.list_attributes:
# mutable list; make a copy for this node
self.attributes[att] = value[:]
else:
self.attributes[att] = value
if self.tagname is None:
self.tagname = self.__class__.__name__
def _dom_node(self, domroot):
element = domroot.createElement(self.tagname)
for attribute, value in self.attlist():
if isinstance(value, list):
value = ' '.join([serial_escape('%s' % (v,)) for v in value])
element.setAttribute(attribute, '%s' % value)
for child in self.children:
element.appendChild(child._dom_node(domroot))
return element
def __repr__(self):
data = ''
for c in self.children:
data += c.shortrepr()
if len(data) > 60:
data = data[:56] + ' ...'
break
if self['names']:
return '<%s "%s": %s>' % (self.__class__.__name__,
'; '.join([ensure_str(n) for n in self['names']]), data)
else:
return '<%s: %s>' % (self.__class__.__name__, data)
def shortrepr(self):
if self['names']:
return '<%s "%s"...>' % (self.__class__.__name__,
'; '.join([ensure_str(n) for n in self['names']]))
else:
return '<%s...>' % self.tagname
def __unicode__(self):
if self.children:
return u'%s%s%s' % (self.starttag(),
''.join([unicode(c) for c in self.children]),
self.endtag())
else:
return self.emptytag()
if sys.version_info > (3,):
# 2to3 doesn't convert __unicode__ to __str__
__str__ = __unicode__
def starttag(self, quoteattr=None):
# the optional arg is used by the docutils_xml writer
if quoteattr is None:
quoteattr = pseudo_quoteattr
parts = [self.tagname]
for name, value in self.attlist():
if value is None: # boolean attribute
parts.append('%s="True"' % name)
continue
if isinstance(value, list):
values = [serial_escape('%s' % (v,)) for v in value]
value = ' '.join(values)
else:
value = unicode(value)
value = quoteattr(value)
parts.append(u'%s=%s' % (name, value))
return u'<%s>' % u' '.join(parts)
def endtag(self):
return '</%s>' % self.tagname
def emptytag(self):
return u'<%s/>' % u' '.join([self.tagname] +
['%s="%s"' % (n, v)
for n, v in self.attlist()])
def __len__(self):
return len(self.children)
def __contains__(self, key):
# support both membership test for children and attributes
# (has_key is translated to "in" by 2to3)
if isinstance(key, basestring):
return key in self.attributes
return key in self.children
def __getitem__(self, key):
if isinstance(key, basestring):
return self.attributes[key]
elif isinstance(key, int):
return self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
return self.children[key.start:key.stop]
else:
raise TypeError('element index must be an integer, a slice, or '
'an attribute name string')
def __setitem__(self, key, item):
if isinstance(key, basestring):
self.attributes[str(key)] = item
elif isinstance(key, int):
self.setup_child(item)
self.children[key] = item
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
for node in item:
self.setup_child(node)
self.children[key.start:key.stop] = item
else:
raise TypeError('element index must be an integer, a slice, or '
'an attribute name string')
def __delitem__(self, key):
if isinstance(key, basestring):
del self.attributes[key]
elif isinstance(key, int):
del self.children[key]
elif isinstance(key, types.SliceType):
assert key.step in (None, 1), 'cannot handle slice with stride'
del self.children[key.start:key.stop]
else:
raise TypeError('element index must be an integer, a simple '
'slice, or an attribute name string')
def __add__(self, other):
return self.children + other
def __radd__(self, other):
return other + self.children
def __iadd__(self, other):
"""Append a node or a list of nodes to `self.children`."""
if isinstance(other, Node):
self.append(other)
elif other is not None:
self.extend(other)
return self
def astext(self):
return self.child_text_separator.join(
[child.astext() for child in self.children])
def non_default_attributes(self):
atts = {}
for key, value in self.attributes.items():
if self.is_not_default(key):
atts[key] = value
return atts
def attlist(self):
attlist = self.non_default_attributes().items()
attlist.sort()
return attlist
def get(self, key, failobj=None):
return self.attributes.get(key, failobj)
def hasattr(self, attr):
return attr in self.attributes
def delattr(self, attr):
if attr in self.attributes:
del self.attributes[attr]
def setdefault(self, key, failobj=None):
return self.attributes.setdefault(key, failobj)
has_key = hasattr
# support operator ``in``
__contains__ = hasattr
def get_language_code(self, fallback=''):
"""Return node's language tag.
Look iteratively in self and parents for a class argument
starting with ``language-`` and return the remainder of it
(which should be a `BCP49` language tag) or the `fallback`.
"""
for cls in self.get('classes', []):
if cls.startswith('language-'):
return cls[9:]
try:
return self.parent.get_language(fallback)
except AttributeError:
return fallback
def append(self, item):
self.setup_child(item)
self.children.append(item)
def extend(self, item):
for node in item:
self.append(node)
def insert(self, index, item):
if isinstance(item, Node):
self.setup_child(item)
self.children.insert(index, item)
elif item is not None:
self[index:index] = item
def pop(self, i=-1):
return self.children.pop(i)
def remove(self, item):
self.children.remove(item)
def index(self, item):
return self.children.index(item)
def is_not_default(self, key):
if self[key] == [] and key in self.list_attributes:
return 0
else:
return 1
def update_basic_atts(self, dict_):
"""
Update basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') from node or dictionary `dict_`.
"""
if isinstance(dict_, Node):
dict_ = dict_.attributes
for att in self.basic_attributes:
self.append_attr_list(att, dict_.get(att, []))
def append_attr_list(self, attr, values):
"""
For each element in values, if it does not exist in self[attr], append
it.
NOTE: Requires self[attr] and values to be sequence type and the
former should specifically be a list.
"""
# List Concatenation
for value in values:
if not value in self[attr]:
self[attr].append(value)
def coerce_append_attr_list(self, attr, value):
"""
First, convert both self[attr] and value to a non-string sequence
type; if either is not already a sequence, convert it to a list of one
element. Then call append_attr_list.
NOTE: self[attr] and value both must not be None.
"""
# List Concatenation
if not isinstance(self.get(attr), list):
self[attr] = [self[attr]]
if not isinstance(value, list):
value = [value]
self.append_attr_list(attr, value)
def replace_attr(self, attr, value, force = True):
"""
If self[attr] does not exist or force is True or omitted, set
self[attr] to value, otherwise do nothing.
"""
# One or the other
if force or self.get(attr) is None:
self[attr] = value
def copy_attr_convert(self, attr, value, replace = True):
"""
If attr is an attribute of self, set self[attr] to
[self[attr], value], otherwise set self[attr] to value.
NOTE: replace is not used by this function and is kept only for
compatibility with the other copy functions.
"""
if self.get(attr) is not value:
self.coerce_append_attr_list(attr, value)
def copy_attr_coerce(self, attr, value, replace):
"""
If attr is an attribute of self and either self[attr] or value is a
list, convert all non-sequence values to a sequence of 1 element and
then concatenate the two sequence, setting the result to self[attr].
If both self[attr] and value are non-sequences and replace is True or
self[attr] is None, replace self[attr] with value. Otherwise, do
nothing.
"""
if self.get(attr) is not value:
if isinstance(self.get(attr), list) or \
isinstance(value, list):
self.coerce_append_attr_list(attr, value)
else:
self.replace_attr(attr, value, replace)
def copy_attr_concatenate(self, attr, value, replace):
"""
If attr is an attribute of self and both self[attr] and value are
lists, concatenate the two sequences, setting the result to
self[attr]. If either self[attr] or value are non-sequences and
replace is True or self[attr] is None, replace self[attr] with value.
Otherwise, do nothing.
"""
if self.get(attr) is not value:
if isinstance(self.get(attr), list) and \
isinstance(value, list):
self.append_attr_list(attr, value)
else:
self.replace_attr(attr, value, replace)
def copy_attr_consistent(self, attr, value, replace):
"""
If replace is True or selfpattr] is None, replace self[attr] with
value. Otherwise, do nothing.
"""
if self.get(attr) is not value:
self.replace_attr(attr, value, replace)
def update_all_atts(self, dict_, update_fun = copy_attr_consistent,
replace = True, and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_, the two values are
merged based on the value of update_fun. Generally, when replace is
True, the values in self are replaced or merged with the values in
dict_; otherwise, the values in self may be preserved or merged. When
and_source is True, the 'source' attribute is included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
NOTE: It is easier to call the update-specific methods then to pass
the update_fun method to this function.
"""
if isinstance(dict_, Node):
dict_ = dict_.attributes
# Include the source attribute when copying?
if and_source:
filter_fun = self.is_not_list_attribute
else:
filter_fun = self.is_not_known_attribute
# Copy the basic attributes
self.update_basic_atts(dict_)
# Grab other attributes in dict_ not in self except the
# (All basic attributes should be copied already)
for att in filter(filter_fun, dict_):
update_fun(self, att, dict_[att], replace)
def update_all_atts_consistantly(self, dict_, replace = True,
and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_ and replace is True, the
values in self are replaced with the values in dict_; otherwise, the
values in self are preserved. When and_source is True, the 'source'
attribute is included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
"""
self.update_all_atts(dict_, Element.copy_attr_consistent, replace,
and_source)
def update_all_atts_concatenating(self, dict_, replace = True,
and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_ whose values aren't each
lists and replace is True, the values in self are replaced with the
values in dict_; if the values from self and dict_ for the given
identifier are both of list type, then the two lists are concatenated
and the result stored in self; otherwise, the values in self are
preserved. When and_source is True, the 'source' attribute is
included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
"""
self.update_all_atts(dict_, Element.copy_attr_concatenate, replace,
and_source)
def update_all_atts_coercion(self, dict_, replace = True,
and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_ whose values are both
not lists and replace is True, the values in self are replaced with
the values in dict_; if either of the values from self and dict_ for
the given identifier are of list type, then first any non-lists are
converted to 1-element lists and then the two lists are concatenated
and the result stored in self; otherwise, the values in self are
preserved. When and_source is True, the 'source' attribute is
included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
"""
self.update_all_atts(dict_, Element.copy_attr_coerce, replace,
and_source)
def update_all_atts_convert(self, dict_, and_source = False):
"""
Updates all attributes from node or dictionary `dict_`.
Appends the basic attributes ('ids', 'names', 'classes',
'dupnames', but not 'source') and then, for all other attributes in
dict_, updates the same attribute in self. When attributes with the
same identifier appear in both self and dict_ then first any non-lists
are converted to 1-element lists and then the two lists are
concatenated and the result stored in self; otherwise, the values in
self are preserved. When and_source is True, the 'source' attribute
is included in the copy.
NOTE: When replace is False, and self contains a 'source' attribute,
'source' is not replaced even when dict_ has a 'source'
attribute, though it may still be merged into a list depending
on the value of update_fun.
"""
self.update_all_atts(dict_, Element.copy_attr_convert,
and_source = and_source)
def clear(self):
self.children = []
def replace(self, old, new):
"""Replace one child `Node` with another child or children."""
index = self.index(old)
if isinstance(new, Node):
self.setup_child(new)
self[index] = new
elif new is not None:
self[index:index+1] = new
def replace_self(self, new):
"""
Replace `self` node with `new`, where `new` is a node or a
list of nodes.
"""
update = new
if not isinstance(new, Node):
# `new` is a list; update first child.
try:
update = new[0]
except IndexError:
update = None
if isinstance(update, Element):
update.update_basic_atts(self)
else:
# `update` is a Text node or `new` is an empty list.
# Assert that we aren't losing any attributes.
for att in self.basic_attributes:
assert not self[att], \
'Losing "%s" attribute: %s' % (att, self[att])
self.parent.replace(self, new)
def first_child_matching_class(self, childclass, start=0, end=sys.maxsize):
"""
Return the index of the first child whose class exactly matches.
Parameters:
- `childclass`: A `Node` subclass to search for, or a tuple of `Node`
classes. If a tuple, any of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self[index], c):
return index
return None
def first_child_not_matching_class(self, childclass, start=0,
end=sys.maxsize):
"""
Return the index of the first child whose class does *not* match.
Parameters:
- `childclass`: A `Node` subclass to skip, or a tuple of `Node`
classes. If a tuple, none of the classes may match.
- `start`: Initial index to check.
- `end`: Initial index to *not* check.
"""
if not isinstance(childclass, tuple):
childclass = (childclass,)
for index in range(start, min(len(self), end)):
for c in childclass:
if isinstance(self.children[index], c):
break
else:
return index
return None
def pformat(self, indent=' ', level=0):
return ''.join(['%s%s\n' % (indent * level, self.starttag())] +
[child.pformat(indent, level+1)
for child in self.children])
def copy(self):
return self.__class__(rawsource=self.rawsource, **self.attributes)
def deepcopy(self):
copy = self.copy()
copy.extend([child.deepcopy() for child in self.children])
return copy
def set_class(self, name):
"""Add a new class to the "classes" attribute."""
warnings.warn('docutils.nodes.Element.set_class deprecated; '
"append to Element['classes'] list attribute directly",
DeprecationWarning, stacklevel=2)
assert ' ' not in name
self['classes'].append(name.lower())
def note_referenced_by(self, name=None, id=None):
"""Note that this Element has been referenced by its name
`name` or id `id`."""
self.referenced = 1
# Element.expect_referenced_by_* dictionaries map names or ids
# to nodes whose ``referenced`` attribute is set to true as
# soon as this node is referenced by the given name or id.
# Needed for target propagation.
by_name = getattr(self, 'expect_referenced_by_name', {}).get(name)
by_id = getattr(self, 'expect_referenced_by_id', {}).get(id)
if by_name:
assert name is not None
by_name.referenced = 1
if by_id:
assert id is not None
by_id.referenced = 1
@classmethod
def is_not_list_attribute(cls, attr):
"""
Returns True if and only if the given attribute is NOT one of the
basic list attributes defined for all Elements.
"""
return attr not in cls.list_attributes
@classmethod
def is_not_known_attribute(cls, attr):
"""
Returns True if and only if the given attribute is NOT recognized by
this class.
"""
return attr not in cls.known_attributes
class TextElement(Element):
"""
An element which directly contains text.
Its children are all `Text` or `Inline` subclass nodes. You can
check whether an element's context is inline simply by checking whether
its immediate parent is a `TextElement` instance (including subclasses).
This is handy for nodes like `image` that can appear both inline and as
standalone body elements.
If passing children to `__init__()`, make sure to set `text` to
``''`` or some other suitable value.
"""
child_text_separator = ''
"""Separator for child nodes, used by `astext()` method."""
def __init__(self, rawsource='', text='', *children, **attributes):
if text != '':
textnode = Text(text)
Element.__init__(self, rawsource, textnode, *children,
**attributes)
else:
Element.__init__(self, rawsource, *children, **attributes)
class FixedTextElement(TextElement):
"""An element which directly contains preformatted text."""
def __init__(self, rawsource='', text='', *children, **attributes):
TextElement.__init__(self, rawsource, text, *children, **attributes)
self.attributes['xml:space'] = 'preserve'
# ========
# Mixins
# ========
class Resolvable:
resolved = 0
class BackLinkable:
def add_backref(self, refid):
self['backrefs'].append(refid)
# ====================
# Element Categories
# ====================
class Root: pass
class Titular: pass
class PreBibliographic:
"""Category of Node which may occur before Bibliographic Nodes."""
class Bibliographic: pass
class Decorative(PreBibliographic): pass
class Structural: pass
class Body: pass
class General(Body): pass
class Sequential(Body):
"""List-like elements."""
class Admonition(Body): pass
class Special(Body):
"""Special internal body elements."""
class Invisible(PreBibliographic):
"""Internal elements that don't appear in output."""
class Part: pass
class Inline: pass
class Referential(Resolvable): pass
class Targetable(Resolvable):
referenced = 0
indirect_reference_name = None
"""Holds the whitespace_normalized_name (contains mixed case) of a target.
Required for MoinMoin/reST compatibility."""
class Labeled:
"""Contains a `label` as its first element."""
# ==============
# Root Element
# ==============
class document(Root, Structural, Element):
"""
The document root element.
Do not instantiate this class directly; use
`docutils.utils.new_document()` instead.
"""
def __init__(self, settings, reporter, *args, **kwargs):
Element.__init__(self, *args, **kwargs)
self.current_source = None
"""Path to or description of the input source being processed."""
self.current_line = None
"""Line number (1-based) of `current_source`."""
self.settings = settings
"""Runtime settings data record."""
self.reporter = reporter
"""System message generator."""
self.indirect_targets = []
"""List of indirect target nodes."""
self.substitution_defs = {}
"""Mapping of substitution names to substitution_definition nodes."""
self.substitution_names = {}
"""Mapping of case-normalized substitution names to case-sensitive
names."""
self.refnames = {}
"""Mapping of names to lists of referencing nodes."""
self.refids = {}
"""Mapping of ids to lists of referencing nodes."""
self.nameids = {}
"""Mapping of names to unique id's."""
self.nametypes = {}
"""Mapping of names to hyperlink type (boolean: True => explicit,
False => implicit."""
self.ids = {}
"""Mapping of ids to nodes."""
self.footnote_refs = {}
"""Mapping of footnote labels to lists of footnote_reference nodes."""
self.citation_refs = {}
"""Mapping of citation labels to lists of citation_reference nodes."""
self.autofootnotes = []
"""List of auto-numbered footnote nodes."""
self.autofootnote_refs = []
"""List of auto-numbered footnote_reference nodes."""
self.symbol_footnotes = []
"""List of symbol footnote nodes."""
self.symbol_footnote_refs = []
"""List of symbol footnote_reference nodes."""
self.footnotes = []
"""List of manually-numbered footnote nodes."""
self.citations = []
"""List of citation nodes."""
self.autofootnote_start = 1
"""Initial auto-numbered footnote number."""
self.symbol_footnote_start = 0
"""Initial symbol footnote symbol index."""
self.id_start = 1
"""Initial ID number."""
self.parse_messages = []
"""System messages generated while parsing."""
self.transform_messages = []
"""System messages generated while applying transforms."""
import docutils.transforms
self.transformer = docutils.transforms.Transformer(self)
"""Storage for transforms to be applied to this document."""
self.decoration = None
"""Document's `decoration` node."""
self.document = self
def __getstate__(self):
"""
Return dict with unpicklable references removed.
"""
state = self.__dict__.copy()
state['reporter'] = None
state['transformer'] = None
return state
def asdom(self, dom=None):
"""Return a DOM representation of this document."""
if dom is None:
import xml.dom.minidom as dom
domroot = dom.Document()
domroot.appendChild(self._dom_node(domroot))
return domroot
def set_id(self, node, msgnode=None):
for id in node['ids']:
if id in self.ids and self.ids[id] is not node:
msg = self.reporter.severe('Duplicate ID: "%s".' % id)
if msgnode != None:
msgnode += msg
if not node['ids']:
for name in node['names']:
id = self.settings.id_prefix + make_id(name)
if id and id not in self.ids:
break
else:
id = ''
while not id or id in self.ids:
id = (self.settings.id_prefix +
self.settings.auto_id_prefix + str(self.id_start))
self.id_start += 1
node['ids'].append(id)
self.ids[id] = node
return id
def set_name_id_map(self, node, id, msgnode=None, explicit=None):
"""
`self.nameids` maps names to IDs, while `self.nametypes` maps names to
booleans representing hyperlink type (True==explicit,
False==implicit). This method updates the mappings.
The following state transition table shows how `self.nameids` ("ids")
and `self.nametypes` ("types") change with new input (a call to this
method), and what actions are performed ("implicit"-type system
messages are INFO/1, and "explicit"-type system messages are ERROR/3):
==== ===== ======== ======== ======= ==== ===== =====
Old State Input Action New State Notes
----------- -------- ----------------- ----------- -----
ids types new type sys.msg. dupname ids types
==== ===== ======== ======== ======= ==== ===== =====
- - explicit - - new True
- - implicit - - new False
None False explicit - - new True
old False explicit implicit old new True
None True explicit explicit new None True
old True explicit explicit new,old None True [#]_
None False implicit implicit new None False
old False implicit implicit new,old None False
None True implicit implicit new None True
old True implicit implicit new old True
==== ===== ======== ======== ======= ==== ===== =====
.. [#] Do not clear the name-to-id map or invalidate the old target if
both old and new targets are external and refer to identical URIs.
The new target is invalidated regardless.
"""
for name in node['names']:
if name in self.nameids:
self.set_duplicate_name_id(node, id, name, msgnode, explicit)
else:
self.nameids[name] = id
self.nametypes[name] = explicit
def set_duplicate_name_id(self, node, id, name, msgnode, explicit):
old_id = self.nameids[name]
old_explicit = self.nametypes[name]
self.nametypes[name] = old_explicit or explicit
if explicit:
if old_explicit:
level = 2
if old_id is not None:
old_node = self.ids[old_id]
if 'refuri' in node:
refuri = node['refuri']
if old_node['names'] \
and 'refuri' in old_node \
and old_node['refuri'] == refuri:
level = 1 # just inform if refuri's identical
if level > 1:
dupname(old_node, name)
self.nameids[name] = None
msg = self.reporter.system_message(
level, 'Duplicate explicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
dupname(node, name)
else:
self.nameids[name] = id
if old_id is not None:
old_node = self.ids[old_id]
dupname(old_node, name)
else:
if old_id is not None and not old_explicit:
self.nameids[name] = None
old_node = self.ids[old_id]
dupname(old_node, name)
dupname(node, name)
if not explicit or (not old_explicit and old_id is not None):
msg = self.reporter.info(
'Duplicate implicit target name: "%s".' % name,
backrefs=[id], base_node=node)
if msgnode != None:
msgnode += msg
def has_name(self, name):
return name in self.nameids
# "note" here is an imperative verb: "take note of".
def note_implicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=None)
def note_explicit_target(self, target, msgnode=None):
id = self.set_id(target, msgnode)
self.set_name_id_map(target, id, msgnode, explicit=True)
def note_refname(self, node):
self.refnames.setdefault(node['refname'], []).append(node)
def note_refid(self, node):
self.refids.setdefault(node['refid'], []).append(node)
def note_indirect_target(self, target):
self.indirect_targets.append(target)
if target['names']:
self.note_refname(target)
def note_anonymous_target(self, target):
self.set_id(target)
def note_autofootnote(self, footnote):
self.set_id(footnote)
self.autofootnotes.append(footnote)
def note_autofootnote_ref(self, ref):
self.set_id(ref)
self.autofootnote_refs.append(ref)
def note_symbol_footnote(self, footnote):
self.set_id(footnote)
self.symbol_footnotes.append(footnote)
def note_symbol_footnote_ref(self, ref):
self.set_id(ref)
self.symbol_footnote_refs.append(ref)
def note_footnote(self, footnote):
self.set_id(footnote)
self.footnotes.append(footnote)
def note_footnote_ref(self, ref):
self.set_id(ref)
self.footnote_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_citation(self, citation):
self.citations.append(citation)
def note_citation_ref(self, ref):
self.set_id(ref)
self.citation_refs.setdefault(ref['refname'], []).append(ref)
self.note_refname(ref)
def note_substitution_def(self, subdef, def_name, msgnode=None):
name = whitespace_normalize_name(def_name)
if name in self.substitution_defs:
msg = self.reporter.error(
'Duplicate substitution definition name: "%s".' % name,
base_node=subdef)
if msgnode != None:
msgnode += msg
oldnode = self.substitution_defs[name]
dupname(oldnode, name)
# keep only the last definition:
self.substitution_defs[name] = subdef
# case-insensitive mapping:
self.substitution_names[fully_normalize_name(name)] = name
def note_substitution_ref(self, subref, refname):
subref['refname'] = whitespace_normalize_name(refname)
def note_pending(self, pending, priority=None):
self.transformer.add_pending(pending, priority)
def note_parse_message(self, message):
self.parse_messages.append(message)
def note_transform_message(self, message):
self.transform_messages.append(message)
def note_source(self, source, offset):
self.current_source = source
if offset is None:
self.current_line = offset
else:
self.current_line = offset + 1
def copy(self):
return self.__class__(self.settings, self.reporter,
**self.attributes)
def get_decoration(self):
if not self.decoration:
self.decoration = decoration()
index = self.first_child_not_matching_class(Titular)
if index is None:
self.append(self.decoration)
else:
self.insert(index, self.decoration)
return self.decoration
# ================
# Title Elements
# ================
class title(Titular, PreBibliographic, TextElement): pass
class subtitle(Titular, PreBibliographic, TextElement): pass
class rubric(Titular, TextElement): pass
# ========================
# Bibliographic Elements
# ========================
class docinfo(Bibliographic, Element): pass
class author(Bibliographic, TextElement): pass
class authors(Bibliographic, Element): pass
class organization(Bibliographic, TextElement): pass
class address(Bibliographic, FixedTextElement): pass
class contact(Bibliographic, TextElement): pass
class version(Bibliographic, TextElement): pass
class revision(Bibliographic, TextElement): pass
class status(Bibliographic, TextElement): pass
class date(Bibliographic, TextElement): pass
class copyright(Bibliographic, TextElement): pass
# =====================
# Decorative Elements
# =====================
class decoration(Decorative, Element):
def get_header(self):
if not len(self.children) or not isinstance(self.children[0], header):
self.insert(0, header())
return self.children[0]
def get_footer(self):
if not len(self.children) or not isinstance(self.children[-1], footer):
self.append(footer())
return self.children[-1]
class header(Decorative, Element): pass
class footer(Decorative, Element): pass
# =====================
# Structural Elements
# =====================
class section(Structural, Element): pass
class topic(Structural, Element):
"""
Topics are terminal, "leaf" mini-sections, like block quotes with titles,
or textual figures. A topic is just like a section, except that it has no
subsections, and it doesn't have to conform to section placement rules.
Topics are allowed wherever body elements (list, table, etc.) are allowed,
but only at the top level of a section or document. Topics cannot nest
inside topics, sidebars, or body elements; you can't have a topic inside a
table, list, block quote, etc.
"""
class sidebar(Structural, Element):
"""
Sidebars are like miniature, parallel documents that occur inside other
documents, providing related or reference material. A sidebar is
typically offset by a border and "floats" to the side of the page; the
document's main text may flow around it. Sidebars can also be likened to
super-footnotes; their content is outside of the flow of the document's
main text.
Sidebars are allowed wherever body elements (list, table, etc.) are
allowed, but only at the top level of a section or document. Sidebars
cannot nest inside sidebars, topics, or body elements; you can't have a
sidebar inside a table, list, block quote, etc.
"""
class transition(Structural, Element): pass
# ===============
# Body Elements
# ===============
class paragraph(General, TextElement): pass
class compound(General, Element): pass
class container(General, Element): pass
class bullet_list(Sequential, Element): pass
class enumerated_list(Sequential, Element): pass
class list_item(Part, Element): pass
class definition_list(Sequential, Element): pass
class definition_list_item(Part, Element): pass
class term(Part, TextElement): pass
class classifier(Part, TextElement): pass
class definition(Part, Element): pass
class field_list(Sequential, Element): pass
class field(Part, Element): pass
class field_name(Part, TextElement): pass
class field_body(Part, Element): pass
class option(Part, Element):
child_text_separator = ''
class option_argument(Part, TextElement):
def astext(self):
return self.get('delimiter', ' ') + TextElement.astext(self)
class option_group(Part, Element):
child_text_separator = ', '
class option_list(Sequential, Element): pass
class option_list_item(Part, Element):
child_text_separator = ' '
class option_string(Part, TextElement): pass
class description(Part, Element): pass
class literal_block(General, FixedTextElement): pass
class doctest_block(General, FixedTextElement): pass
class math_block(General, FixedTextElement): pass
class line_block(General, Element): pass
class line(Part, TextElement):
indent = None
class block_quote(General, Element): pass
class attribution(Part, TextElement): pass
class attention(Admonition, Element): pass
class caution(Admonition, Element): pass
class danger(Admonition, Element): pass
class error(Admonition, Element): pass
class important(Admonition, Element): pass
class note(Admonition, Element): pass
class tip(Admonition, Element): pass
class hint(Admonition, Element): pass
class warning(Admonition, Element): pass
class admonition(Admonition, Element): pass
class comment(Special, Invisible, FixedTextElement): pass
class substitution_definition(Special, Invisible, TextElement): pass
class target(Special, Invisible, Inline, TextElement, Targetable): pass
class footnote(General, BackLinkable, Element, Labeled, Targetable): pass
class citation(General, BackLinkable, Element, Labeled, Targetable): pass
class label(Part, TextElement): pass
class figure(General, Element): pass
class caption(Part, TextElement): pass
class legend(Part, Element): pass
class table(General, Element): pass
class tgroup(Part, Element): pass
class colspec(Part, Element): pass
class thead(Part, Element): pass
class tbody(Part, Element): pass
class row(Part, Element): pass
class entry(Part, Element): pass
class system_message(Special, BackLinkable, PreBibliographic, Element):
"""
System message element.
Do not instantiate this class directly; use
``document.reporter.info/warning/error/severe()`` instead.
"""
def __init__(self, message=None, *children, **attributes):
if message:
p = paragraph('', message)
children = (p,) + children
try:
Element.__init__(self, '', *children, **attributes)
except:
print('system_message: children=%r' % (children,))
raise
def astext(self):
line = self.get('line', '')
return u'%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
self['level'], Element.astext(self))
class pending(Special, Invisible, Element):
"""
The "pending" element is used to encapsulate a pending operation: the
operation (transform), the point at which to apply it, and any data it
requires. Only the pending operation's location within the document is
stored in the public document tree (by the "pending" object itself); the
operation and its data are stored in the "pending" object's internal
instance attributes.
For example, say you want a table of contents in your reStructuredText
document. The easiest way to specify where to put it is from within the
document, with a directive::
.. contents::
But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive
code leaves a placeholder behind that will trigger the second phase of its
processing, something like this::
<pending ...public attributes...> + internal attributes
Use `document.note_pending()` so that the
`docutils.transforms.Transformer` stage of processing can run all pending
transforms.
"""
def __init__(self, transform, details=None,
rawsource='', *children, **attributes):
Element.__init__(self, rawsource, *children, **attributes)
self.transform = transform
"""The `docutils.transforms.Transform` class implementing the pending
operation."""
self.details = details or {}
"""Detail data (dictionary) required by the pending operation."""
def pformat(self, indent=' ', level=0):
internals = [
'.. internal attributes:',
' .transform: %s.%s' % (self.transform.__module__,
self.transform.__name__),
' .details:']
details = self.details.items()
details.sort()
for key, value in details:
if isinstance(value, Node):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
elif value and isinstance(value, list) \
and isinstance(value[0], Node):
internals.append('%7s%s:' % ('', key))
for v in value:
internals.extend(['%9s%s' % ('', line)
for line in v.pformat().splitlines()])
else:
internals.append('%7s%s: %r' % ('', key, value))
return (Element.pformat(self, indent, level)
+ ''.join([(' %s%s\n' % (indent * level, line))
for line in internals]))
def copy(self):
return self.__class__(self.transform, self.details, self.rawsource,
**self.attributes)
class raw(Special, Inline, PreBibliographic, FixedTextElement):
"""
Raw data that is to be passed untouched to the Writer.
"""
pass
# =================
# Inline Elements
# =================
class emphasis(Inline, TextElement): pass
class strong(Inline, TextElement): pass
class literal(Inline, TextElement): pass
class reference(General, Inline, Referential, TextElement): pass
class footnote_reference(Inline, Referential, TextElement): pass
class citation_reference(Inline, Referential, TextElement): pass
class substitution_reference(Inline, TextElement): pass
class title_reference(Inline, TextElement): pass
class abbreviation(Inline, TextElement): pass
class acronym(Inline, TextElement): pass
class superscript(Inline, TextElement): pass
class subscript(Inline, TextElement): pass
class math(Inline, TextElement): pass
class image(General, Inline, Element):
def astext(self):
return self.get('alt', '')
class inline(Inline, TextElement): pass
class problematic(Inline, TextElement): pass
class generated(Inline, TextElement): pass
# ========================================
# Auxiliary Classes, Functions, and Data
# ========================================
node_class_names = """
Text
abbreviation acronym address admonition attention attribution author
authors
block_quote bullet_list
caption caution citation citation_reference classifier colspec comment
compound contact container copyright
danger date decoration definition definition_list definition_list_item
description docinfo doctest_block document
emphasis entry enumerated_list error
field field_body field_list field_name figure footer
footnote footnote_reference
generated
header hint
image important inline
label legend line line_block list_item literal literal_block
math math_block
note
option option_argument option_group option_list option_list_item
option_string organization
paragraph pending problematic
raw reference revision row rubric
section sidebar status strong subscript substitution_definition
substitution_reference subtitle superscript system_message
table target tbody term tgroup thead tip title title_reference topic
transition
version
warning""".split()
"""A list of names of all concrete Node subclasses."""
class NodeVisitor:
"""
"Visitor" pattern [GoF95]_ abstract superclass implementation for
document tree traversals.
Each node class has corresponding methods, doing nothing by
default; override individual methods for specific and useful
behaviour. The `dispatch_visit()` method is called by
`Node.walk()` upon entering a node. `Node.walkabout()` also calls
the `dispatch_departure()` method before exiting a node.
The dispatch methods call "``visit_`` + node class name" or
"``depart_`` + node class name", resp.
This is a base class for visitors whose ``visit_...`` & ``depart_...``
methods should be implemented for *all* node types encountered (such as
for `docutils.writers.Writer` subclasses). Unimplemented methods will
raise exceptions.
For sparse traversals, where only certain node types are of interest,
subclass `SparseNodeVisitor` instead. When (mostly or entirely) uniform
processing is desired, subclass `GenericNodeVisitor`.
.. [GoF95] Gamma, Helm, Johnson, Vlissides. *Design Patterns: Elements of
Reusable Object-Oriented Software*. Addison-Wesley, Reading, MA, USA,
1995.
"""
optional = ()
"""
Tuple containing node class names (as strings).
No exception will be raised if writers do not implement visit
or departure functions for these node classes.
Used to ensure transitional compatibility with existing 3rd-party writers.
"""
def __init__(self, document):
self.document = document
def dispatch_visit(self, node):
"""
Call self."``visit_`` + node class name" with `node` as
parameter. If the ``visit_...`` method does not exist, call
self.unknown_visit.
"""
node_name = node.__class__.__name__
method = getattr(self, 'visit_' + node_name, self.unknown_visit)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_visit calling %s for %s'
% (method.__name__, node_name))
return method(node)
def dispatch_departure(self, node):
"""
Call self."``depart_`` + node class name" with `node` as
parameter. If the ``depart_...`` method does not exist, call
self.unknown_departure.
"""
node_name = node.__class__.__name__
method = getattr(self, 'depart_' + node_name, self.unknown_departure)
self.document.reporter.debug(
'docutils.nodes.NodeVisitor.dispatch_departure calling %s for %s'
% (method.__name__, node_name))
return method(node)
def unknown_visit(self, node):
"""
Called when entering unknown `Node` types.
Raise an exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s visiting unknown node type: %s'
% (self.__class__, node.__class__.__name__))
def unknown_departure(self, node):
"""
Called before exiting unknown `Node` types.
Raise exception unless overridden.
"""
if (self.document.settings.strict_visitor
or node.__class__.__name__ not in self.optional):
raise NotImplementedError(
'%s departing unknown node type: %s'
% (self.__class__, node.__class__.__name__))
class SparseNodeVisitor(NodeVisitor):
"""
Base class for sparse traversals, where only certain node types are of
interest. When ``visit_...`` & ``depart_...`` methods should be
implemented for *all* node types (such as for `docutils.writers.Writer`
subclasses), subclass `NodeVisitor` instead.
"""
class GenericNodeVisitor(NodeVisitor):
"""
Generic "Visitor" abstract superclass, for simple traversals.
Unless overridden, each ``visit_...`` method calls `default_visit()`, and
each ``depart_...`` method (when using `Node.walkabout()`) calls
`default_departure()`. `default_visit()` (and `default_departure()`) must
be overridden in subclasses.
Define fully generic visitors by overriding `default_visit()` (and
`default_departure()`) only. Define semi-generic visitors by overriding
individual ``visit_...()`` (and ``depart_...()``) methods also.
`NodeVisitor.unknown_visit()` (`NodeVisitor.unknown_departure()`) should
be overridden for default behavior.
"""
def default_visit(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def default_departure(self, node):
"""Override for generic, uniform traversals."""
raise NotImplementedError
def _call_default_visit(self, node):
self.default_visit(node)
def _call_default_departure(self, node):
self.default_departure(node)
def _nop(self, node):
pass
def _add_node_class_names(names):
"""Save typing with dynamic assignments:"""
for _name in names:
setattr(GenericNodeVisitor, "visit_" + _name, _call_default_visit)
setattr(GenericNodeVisitor, "depart_" + _name, _call_default_departure)
setattr(SparseNodeVisitor, 'visit_' + _name, _nop)
setattr(SparseNodeVisitor, 'depart_' + _name, _nop)
_add_node_class_names(node_class_names)
class TreeCopyVisitor(GenericNodeVisitor):
"""
Make a complete copy of a tree or branch, including element attributes.
"""
def __init__(self, document):
GenericNodeVisitor.__init__(self, document)
self.parent_stack = []
self.parent = []
def get_tree_copy(self):
return self.parent[0]
def default_visit(self, node):
"""Copy the current node, and make it the new acting parent."""
newnode = node.copy()
self.parent.append(newnode)
self.parent_stack.append(self.parent)
self.parent = newnode
def default_departure(self, node):
"""Restore the previous acting parent."""
self.parent = self.parent_stack.pop()
class TreePruningException(Exception):
"""
Base class for `NodeVisitor`-related tree pruning exceptions.
Raise subclasses from within ``visit_...`` or ``depart_...`` methods
called from `Node.walk()` and `Node.walkabout()` tree traversals to prune
the tree traversed.
"""
pass
class SkipChildren(TreePruningException):
"""
Do not visit any children of the current node. The current node's
siblings and ``depart_...`` method are not affected.
"""
pass
class SkipSiblings(TreePruningException):
"""
Do not visit any more siblings (to the right) of the current node. The
current node's children and its ``depart_...`` method are not affected.
"""
pass
class SkipNode(TreePruningException):
"""
Do not visit the current node's children, and do not call the current
node's ``depart_...`` method.
"""
pass
class SkipDeparture(TreePruningException):
"""
Do not call the current node's ``depart_...`` method. The current node's
children and siblings are not affected.
"""
pass
class NodeFound(TreePruningException):
"""
Raise to indicate that the target of a search has been found. This
exception must be caught by the client; it is not caught by the traversal
code.
"""
pass
class StopTraversal(TreePruningException):
"""
Stop the traversal alltogether. The current node's ``depart_...`` method
is not affected. The parent nodes ``depart_...`` methods are also called
as usual. No other nodes are visited. This is an alternative to
NodeFound that does not cause exception handling to trickle up to the
caller.
"""
pass
def make_id(string):
"""
Convert `string` into an identifier and return it.
Docutils identifiers will conform to the regular expression
``[a-z](-?[a-z0-9]+)*``. For CSS compatibility, identifiers (the "class"
and "id" attributes) should have no underscores, colons, or periods.
Hyphens may be used.
- The `HTML 4.01 spec`_ defines identifiers based on SGML tokens:
ID and NAME tokens must begin with a letter ([A-Za-z]) and may be
followed by any number of letters, digits ([0-9]), hyphens ("-"),
underscores ("_"), colons (":"), and periods (".").
- However the `CSS1 spec`_ defines identifiers based on the "name" token,
a tighter interpretation ("flex" tokenizer notation; "latin1" and
"escape" 8-bit characters have been replaced with entities)::
unicode \\[0-9a-f]{1,4}
latin1 [¡-ÿ]
escape {unicode}|\\[ -~¡-ÿ]
nmchar [-a-z0-9]|{latin1}|{escape}
name {nmchar}+
The CSS1 "nmchar" rule does not include underscores ("_"), colons (":"),
or periods ("."), therefore "class" and "id" attributes should not contain
these characters. They should be replaced with hyphens ("-"). Combined
with HTML's requirements (the first character must be a letter; no
"unicode", "latin1", or "escape" characters), this results in the
``[a-z](-?[a-z0-9]+)*`` pattern.
.. _HTML 4.01 spec: http://www.w3.org/TR/html401
.. _CSS1 spec: http://www.w3.org/TR/REC-CSS1
"""
id = string.lower()
if not isinstance(id, unicode):
id = id.decode()
id = id.translate(_non_id_translate_digraphs)
id = id.translate(_non_id_translate)
# get rid of non-ascii characters.
# 'ascii' lowercase to prevent problems with turkish locale.
id = unicodedata.normalize('NFKD', id).\
encode('ascii', 'ignore').decode('ascii')
# shrink runs of whitespace and replace by hyphen
id = _non_id_chars.sub('-', ' '.join(id.split()))
id = _non_id_at_ends.sub('', id)
return str(id)
_non_id_chars = re.compile('[^a-z0-9]+')
_non_id_at_ends = re.compile('^[-0-9]+|-+$')
_non_id_translate = {
0x00f8: u'o', # o with stroke
0x0111: u'd', # d with stroke
0x0127: u'h', # h with stroke
0x0131: u'i', # dotless i
0x0142: u'l', # l with stroke
0x0167: u't', # t with stroke
0x0180: u'b', # b with stroke
0x0183: u'b', # b with topbar
0x0188: u'c', # c with hook
0x018c: u'd', # d with topbar
0x0192: u'f', # f with hook
0x0199: u'k', # k with hook
0x019a: u'l', # l with bar
0x019e: u'n', # n with long right leg
0x01a5: u'p', # p with hook
0x01ab: u't', # t with palatal hook
0x01ad: u't', # t with hook
0x01b4: u'y', # y with hook
0x01b6: u'z', # z with stroke
0x01e5: u'g', # g with stroke
0x0225: u'z', # z with hook
0x0234: u'l', # l with curl
0x0235: u'n', # n with curl
0x0236: u't', # t with curl
0x0237: u'j', # dotless j
0x023c: u'c', # c with stroke
0x023f: u's', # s with swash tail
0x0240: u'z', # z with swash tail
0x0247: u'e', # e with stroke
0x0249: u'j', # j with stroke
0x024b: u'q', # q with hook tail
0x024d: u'r', # r with stroke
0x024f: u'y', # y with stroke
}
_non_id_translate_digraphs = {
0x00df: u'sz', # ligature sz
0x00e6: u'ae', # ae
0x0153: u'oe', # ligature oe
0x0238: u'db', # db digraph
0x0239: u'qp', # qp digraph
}
def dupname(node, name):
node['dupnames'].append(name)
node['names'].remove(name)
# Assume that this method is referenced, even though it isn't; we
# don't want to throw unnecessary system_messages.
node.referenced = 1
def fully_normalize_name(name):
"""Return a case- and whitespace-normalized name."""
return ' '.join(name.lower().split())
def whitespace_normalize_name(name):
"""Return a whitespace-normalized name."""
return ' '.join(name.split())
def serial_escape(value):
"""Escape string values that are elements of a list, for serialization."""
return value.replace('\\', r'\\').replace(' ', r'\ ')
def pseudo_quoteattr(value):
"""Quote attributes for pseudo-xml"""
return '"%s"' % value
#
#
# Local Variables:
# indent-tabs-mode: nil
# sentence-end-double-space: t
# fill-column: 78
# End:
|
jmwright/cadquery-freecad-module
|
Libs/docutils/nodes.py
|
Python
|
lgpl-3.0
| 77,350
|
[
"VisIt"
] |
364512a9b611d744996d5126fa9a01d0822675f598c5bf49602081379feea35d
|
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Gibbs sampling inference for (a special case of) STS models.
These methods implement Gibbs sampling steps for STS models that combine a
single LocalLevel or LocalLinearTrend component with a linear regression
component, with conjugate
InverseGamma priors on the scale and a Gaussian prior on the weights. This model
class is somewhat general, in that we assume that any seasonal/holiday variation
can be encoded in the design matrix of the linear regression. The intent is to
support deployment of STS inference in latency-sensitive applications.
This Gibbs sampler tends to reach acceptable answers much more quickly than
fitting the same models by gradient-based methods (VI or HMC). Because it does
not marginalize out the linear Gaussian latents analytically, it may be more
prone to getting stuck at a single (perhaps suboptimal) posterior explanation;
however, in practice it often finds good solutions.
The speed advantage of Gibbs sampling in this model likely arises from a
combination of:
- Analytically sampling the regression weights once per sampling cycle, instead
of requiring a quadratically-expensive update at each timestep of Kalman
filtering (as in DynamicLinearRegression), or relying on gradient-based
approximate inference (as in LinearRegression).
- Exploiting conjugacy to sample the scale parameters directly.
- Specializing the Gibbs step for the latent level to the case of a
scalar process with identity transitions.
It would be possible to expand this sampler to support additional STS models,
potentially at a cost with respect to some of these performance advantages (and
additional code):
- To support general latent state-space models, one would augment the sampler
state to track all parameters in the model. Each component would need to
register Gibbs sampling steps for its parameters (assuming conjugate priors),
as a function of the sampled latent trajectory. The resampling steps for the
observation_noise_scale and level_scale parameters would then be replaced with
a generic loop over all parameters in the model.
- For specific models it may be possible to implement an efficient prior
sampling algorithm, analagous to `LocalLevelStateSpaceModel._joint_sample_n`.
This may be significantly faster than the generic sampler and can speed up
the posterior sampling step for the latent trajectory.
"""
import collections
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import bijectors as tfb
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python import sts
from tensorflow_probability.python.distributions import normal_conjugate_posteriors
from tensorflow_probability.python.experimental import distributions as tfde
from tensorflow_probability.python.experimental.sts_gibbs import spike_and_slab
from tensorflow_probability.python.internal import distribution_util as dist_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import prefer_static
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.sts import components as sts_components
from tensorflow_probability.python.sts.internal import util as sts_util
# The sampler state stores current values for each model parameter,
# and auxiliary quantities such as the latent level. It should have the property
# that `model.make_state_space_model(num_timesteps, GibbsSamplerState(...))`
# behaves properly -- i.e., that the state contains all model
# parameters *in the same order* as they are listed in `model.parameters`. This
# is currently enforced by construction in `build_gibbs_fittable_model`.
GibbsSamplerState = collections.namedtuple( # pylint: disable=unexpected-keyword-arg
'GibbsSamplerState',
['observation_noise_scale',
'level_scale',
'weights',
'level',
'seed',
'slope_scale',
'slope',])
# Make the two slope-related quantities optional, for backwards compatibility.
GibbsSamplerState.__new__.__defaults__ = (0., # slope_scale
0.) # slope
# TODO(b/151571025): revert to `tfd.InverseGamma` once its sampler is XLA-able.
class XLACompilableInverseGamma(tfd.InverseGamma):
def _sample_n(self, n, seed=None):
return 1. / tfd.Gamma(
concentration=self.concentration,
rate=self.scale).sample(n, seed=seed)
class DummySpikeAndSlabPrior(tfd.Distribution):
"""Dummy prior on sparse regression weights."""
def __init__(self):
super().__init__(
dtype=tf.float32,
reparameterization_type=tfd.FULLY_REPARAMETERIZED,
validate_args=False,
allow_nan_stats=True,
name='dummy_spike_and_slab_prior')
@property
def event_shape(self):
# Present as a vector-valued distribution.
return tf.TensorShape([1])
def _parameter_control_dependencies(self, is_init):
if not is_init:
raise ValueError(
'Cannot explicitly operate on a spike-and-slab prior; '
'only Gibbs sampling is supported.')
return []
def _default_event_space_bijector(self):
return tfb.Identity()
class SpikeAndSlabSparseLinearRegression(sts_components.LinearRegression):
"""Dummy component for sparse regression with a spike-and-slab prior."""
def __init__(self,
design_matrix,
weights_prior=None,
sparse_weights_nonzero_prob=0.5,
name=None):
# Extract precision matrix from a multivariate normal prior.
weights_prior_precision = None
if hasattr(weights_prior, 'precision'):
weights_prior_precision = weights_prior.precision()
elif weights_prior is not None:
inverse_scale = weights_prior.scale.inverse()
weights_prior_precision = inverse_scale.matmul(inverse_scale,
adjoint=True).to_dense()
self._weights_prior_precision = weights_prior_precision
self._sparse_weights_nonzero_prob = sparse_weights_nonzero_prob
super().__init__(design_matrix=design_matrix,
weights_prior=DummySpikeAndSlabPrior(),
name=name)
def _tile_normal_to_mvn_diag(normal_dist, dim):
return tfd.MultivariateNormalDiag(
loc=normal_dist.loc[..., tf.newaxis],
scale_diag=(normal_dist.scale[..., tf.newaxis] *
tf.ones([dim], dtype=normal_dist.scale.dtype)))
def _is_multivariate_normal(dist):
return (isinstance(dist, tfd.MultivariateNormalLinearOperator) or
isinstance(dist,
tfde.MultivariateNormalPrecisionFactorLinearOperator))
def build_model_for_gibbs_fitting(observed_time_series,
design_matrix,
weights_prior,
level_variance_prior,
observation_noise_variance_prior,
slope_variance_prior=None,
sparse_weights_nonzero_prob=None):
"""Builds a StructuralTimeSeries model instance that supports Gibbs sampling.
To support Gibbs sampling, a model must have have conjugate priors on all
scale and weight parameters, and must be constructed so that
`model.parameters` matches the parameters and ordering specified by the
`GibbsSamplerState` namedtuple. Currently, this includes (only) models
consisting of the sum of a LocalLevel or LocalLinearTrend component with
a LinearRegression or SpikeAndSlabSparseLinearRegression component.
Args:
observed_time_series: optional `float` `Tensor` of shape [..., T, 1]`
(omitting the trailing unit dimension is also supported when `T > 1`),
specifying an observed time series. May optionally be an instance of
`tfp.sts.MaskedTimeSeries`, which includes a mask `Tensor` to specify
timesteps with missing observations.
design_matrix: float `Tensor` of shape `concat([batch_shape,
[num_timesteps, num_features]])`. This may also optionally be
an instance of `tf.linalg.LinearOperator`.
weights_prior: Optional distribution instance specifying a normal prior on
weights. This may be a multivariate normal instance with event shape
`[num_features]`, or a scalar normal distribution with event shape `[]`.
In either case, the batch shape must broadcast to the batch shape of
`observed_time_series`. If a `sparse_weights_nonzero_prob` is specified,
requesting sparse regression, then the `weights_prior` mean is ignored
(because nonzero means are not currently implemented by the spike-and-slab
sampler). In this case, `weights_prior=None` is also valid, and will use
the default prior of the spike-and-slab sampler.
level_variance_prior: An instance of `tfd.InverseGamma` representing a prior
on the level variance (`level_scale**2`) of a local level model. May have
batch shape broadcastable to the batch shape of `observed_time_series`.
observation_noise_variance_prior: An instance of `tfd.InverseGamma`
representing a prior on the observation noise variance (
`observation_noise_scale**2`). May have batch shape broadcastable to the
batch shape of `observed_time_series`.
slope_variance_prior: Optional instance of `tfd.InverseGamma` representing
a prior on slope variance (`slope_scale**2`) of a local linear trend
model. May have batch shape broadcastable to the batch shape of
`observed_time_series`. If specified, a local linear trend model is used
rather than a local level model.
Default value: `None`.
sparse_weights_nonzero_prob: Optional scalar float `Tensor` prior
probability that any given feature has nonzero weight. If specified, this
triggers a sparse regression with a spike-and-slab prior, where
`sparse_weights_nonzero_prob` is the prior probability of the 'slab'
component.
Default value: `None`.
Returns:
model: A `tfp.sts.StructuralTimeSeries` model instance.
"""
if isinstance(weights_prior, tfd.Normal):
# Canonicalize scalar normal priors as diagonal MVNs.
if isinstance(design_matrix, tf.linalg.LinearOperator):
num_features = design_matrix.shape_tensor()[-1]
else:
num_features = tf.shape(design_matrix)[-1]
weights_prior = _tile_normal_to_mvn_diag(weights_prior, num_features)
elif weights_prior is not None and not _is_multivariate_normal(weights_prior):
raise ValueError('Weights prior must be a normal distribution or `None`.')
if not isinstance(level_variance_prior, tfd.InverseGamma):
raise ValueError(
'Level variance prior must be an inverse gamma distribution.')
if (slope_variance_prior is not None and
not isinstance(slope_variance_prior, tfd.InverseGamma)):
raise ValueError(
'Slope variance prior must be an inverse gamma distribution; got: {}.'
.format(slope_variance_prior))
if not isinstance(observation_noise_variance_prior, tfd.InverseGamma):
raise ValueError('Observation noise variance prior must be an inverse '
'gamma distribution.')
sqrt = tfb.Invert(tfb.Square()) # Converts variance priors to scale priors.
# Level or trend component.
if slope_variance_prior:
local_variation = sts.LocalLinearTrend(
observed_time_series=observed_time_series,
level_scale_prior=sqrt(level_variance_prior),
slope_scale_prior=sqrt(slope_variance_prior),
name='local_linear_trend')
else:
local_variation = sts.LocalLevel(
observed_time_series=observed_time_series,
level_scale_prior=sqrt(level_variance_prior),
name='local_level')
# Regression component.
if sparse_weights_nonzero_prob is not None:
regression = SpikeAndSlabSparseLinearRegression(
design_matrix=design_matrix,
weights_prior=weights_prior,
sparse_weights_nonzero_prob=sparse_weights_nonzero_prob,
name='sparse_regression')
else:
regression = sts.LinearRegression(design_matrix=design_matrix,
weights_prior=weights_prior,
name='regression')
model = sts.Sum([local_variation, regression],
observed_time_series=observed_time_series,
observation_noise_scale_prior=sqrt(
observation_noise_variance_prior),
# The Gibbs sampling steps in this file do not account for an
# offset to the observed series. Instead, we assume the
# observed series has already been centered and
# scale-normalized.
constant_offset=0.)
model.supports_gibbs_sampling = True
return model
def _get_design_matrix(model):
"""Returns the design matrix for an STS model with a regression component."""
design_matrices = [component.design_matrix for component in model.components
if hasattr(component, 'design_matrix')]
if not design_matrices:
raise ValueError('Model does not contain a regression component.')
if len(design_matrices) > 1:
raise ValueError('Model contains multiple regression components.')
return design_matrices[0]
def fit_with_gibbs_sampling(model,
observed_time_series,
num_chains=(),
num_results=2000,
num_warmup_steps=200,
initial_state=None,
seed=None):
"""Fits parameters for an STS model using Gibbs sampling.
Args:
model: A `tfp.sts.StructuralTimeSeries` model instance return by
`build_model_for_gibbs_fitting`.
observed_time_series: `float` `Tensor` of shape [..., T, 1]`
(omitting the trailing unit dimension is also supported when `T > 1`),
specifying an observed time series. May optionally be an instance of
`tfp.sts.MaskedTimeSeries`, which includes a mask `Tensor` to specify
timesteps with missing observations.
num_chains: Optional int to indicate the number of parallel MCMC chains.
Default to an empty tuple to sample a single chain.
num_results: Optional int to indicate number of MCMC samples.
num_warmup_steps: Optional int to indicate number of MCMC samples.
initial_state: A `GibbsSamplerState` structure of the initial states of the
MCMC chains.
seed: Optional `Python` `int` seed controlling the sampled values.
Returns:
model: A `GibbsSamplerState` structure of posterior samples.
"""
if not hasattr(model, 'supports_gibbs_sampling'):
raise ValueError('This STS model does not support Gibbs sampling. Models '
'for Gibbs sampling must be created using the '
'method `build_model_for_gibbs_fitting`.')
if not tf.nest.is_nested(num_chains):
num_chains = [num_chains]
[
observed_time_series,
is_missing
] = sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series)
dtype = observed_time_series.dtype
# The canonicalized time series always has trailing dimension `1`,
# because although LinearGaussianSSMs support vector observations, STS models
# describe scalar time series only. For our purposes it'll be cleaner to
# remove this dimension.
observed_time_series = observed_time_series[..., 0]
batch_shape = prefer_static.concat(
[num_chains,
prefer_static.shape(observed_time_series)[:-1]], axis=-1)
level_slope_shape = prefer_static.concat(
[num_chains, prefer_static.shape(observed_time_series)], axis=-1)
# Treat a LocalLevel model as the special case of LocalLinearTrend where
# the slope_scale is always zero.
initial_slope_scale = 0.
initial_slope = 0.
if isinstance(model.components[0], sts.LocalLinearTrend):
initial_slope_scale = 1. * tf.ones(batch_shape, dtype=dtype)
initial_slope = tf.zeros(level_slope_shape, dtype=dtype)
if initial_state is None:
initial_state = GibbsSamplerState(
observation_noise_scale=tf.ones(batch_shape, dtype=dtype),
level_scale=tf.ones(batch_shape, dtype=dtype),
slope_scale=initial_slope_scale,
weights=tf.zeros(prefer_static.concat([
batch_shape,
_get_design_matrix(model).shape[-1:]], axis=0), dtype=dtype),
level=tf.zeros(level_slope_shape, dtype=dtype),
slope=initial_slope,
seed=None) # Set below.
if isinstance(seed, six.integer_types):
tf.random.set_seed(seed)
# Always use the passed-in `seed` arg, ignoring any seed in the initial state.
initial_state = initial_state._replace(
seed=samplers.sanitize_seed(seed, salt='initial_GibbsSamplerState'))
sampler_loop_body = _build_sampler_loop_body(model,
observed_time_series,
is_missing)
samples = tf.scan(sampler_loop_body,
np.arange(num_warmup_steps + num_results),
initial_state)
return tf.nest.map_structure(lambda x: x[num_warmup_steps:], samples)
def one_step_predictive(model,
posterior_samples,
num_forecast_steps=0,
original_mean=0.,
original_scale=1.,
thin_every=10):
"""Constructs a one-step-ahead predictive distribution at every timestep.
Unlike the generic `tfp.sts.one_step_predictive`, this method uses the
latent levels from Gibbs sampling to efficiently construct a predictive
distribution that mixes over posterior samples. The predictive distribution
may also include additional forecast steps.
This method returns the predictive distributions for each timestep given
previous timesteps and sampled model parameters, `p(observed_time_series[t] |
observed_time_series[:t], weights, observation_noise_scale)`. Note that the
posterior values of the weights and noise scale will in general be informed
by observations from all timesteps *including the step being predicted*, so
this is not a strictly kosher probabilistic quantity, but in general we assume
that it's close, i.e., that the step being predicted had very small individual
impact on the overall parameter posterior.
Args:
model: A `tfd.sts.StructuralTimeSeries` model instance. This must be of the
form constructed by `build_model_for_gibbs_sampling`.
posterior_samples: A `GibbsSamplerState` instance in which each element is a
`Tensor` with initial dimension of size `num_samples`.
num_forecast_steps: Python `int` number of additional forecast steps to
append.
Default value: `0`.
original_mean: Optional scalar float `Tensor`, added to the predictive
distribution to undo the effect of input normalization.
Default value: `0.`
original_scale: Optional scalar float `Tensor`, used to rescale the
predictive distribution to undo the effect of input normalization.
Default value: `1.`
thin_every: Optional Python `int` factor by which to thin the posterior
samples, to reduce complexity of the predictive distribution. For example,
if `thin_every=10`, every `10`th sample will be used.
Default value: `10`.
Returns:
predictive_dist: A `tfd.MixtureSameFamily` instance of event shape
`[num_timesteps + num_forecast_steps]` representing the predictive
distribution of each timestep given previous timesteps.
"""
dtype = dtype_util.common_dtype([
posterior_samples.level_scale,
posterior_samples.observation_noise_scale,
posterior_samples.level,
original_mean,
original_scale], dtype_hint=tf.float32)
num_observed_steps = prefer_static.shape(posterior_samples.level)[-1]
original_mean = tf.convert_to_tensor(original_mean, dtype=dtype)
original_scale = tf.convert_to_tensor(original_scale, dtype=dtype)
thinned_samples = tf.nest.map_structure(lambda x: x[::thin_every],
posterior_samples)
if prefer_static.rank_from_shape( # If no slope was inferred, treat as zero.
prefer_static.shape(thinned_samples.slope)) <= 1:
thinned_samples = thinned_samples._replace(
slope=tf.zeros_like(thinned_samples.level),
slope_scale=tf.zeros_like(thinned_samples.level_scale))
num_steps_from_last_observation = tf.concat([
tf.ones([num_observed_steps], dtype=dtype),
tf.range(1, num_forecast_steps + 1, dtype=dtype)], axis=0)
# The local linear trend model expects that the level at step t + 1 is equal
# to the level at step t, plus the slope at time t - 1,
# plus transition noise of scale 'level_scale' (which we account for below).
if num_forecast_steps > 0:
num_batch_dims = prefer_static.rank_from_shape(
prefer_static.shape(thinned_samples.level)) - 2
# All else equal, the current level will remain stationary.
forecast_level = tf.tile(thinned_samples.level[..., -1:],
tf.concat([tf.ones([num_batch_dims + 1],
dtype=tf.int32),
[num_forecast_steps]], axis=0))
# If the model includes slope, the level will steadily increase.
forecast_level += (thinned_samples.slope[..., -1:] *
tf.range(1., num_forecast_steps + 1.,
dtype=forecast_level.dtype))
level_pred = tf.concat([thinned_samples.level[..., :1], # t == 0
(thinned_samples.level[..., :-1] +
thinned_samples.slope[..., :-1]) # 1 <= t < T
] + (
[forecast_level]
if num_forecast_steps > 0 else []),
axis=-1)
design_matrix = _get_design_matrix(
model).to_dense()[:num_observed_steps + num_forecast_steps]
regression_effect = tf.linalg.matvec(design_matrix, thinned_samples.weights)
y_mean = ((level_pred + regression_effect) *
original_scale[..., tf.newaxis] + original_mean[..., tf.newaxis])
# To derive a forecast variance, including slope uncertainty, let
# `r[:k]` be iid Gaussian RVs with variance `level_scale**2` and `s[:k]` be
# iid Gaussian RVs with variance `slope_scale**2`. Then the forecast level at
# step `T + k` can be written as
# (level[T] + # Last known level.
# r[0] + ... + r[k] + # Sum of random walk terms on level.
# slope[T] * k # Contribution from last known slope.
# (k - 1) * s[0] + # Contributions from random walk terms on slope.
# (k - 2) * s[1] +
# ... +
# 1 * s[k - 1])
# which has variance of
# (level_scale**2 * k +
# slope_scale**2 * ( (k - 1)**2 +
# (k - 2)**2 +
# ... + 1 ))
# Here the `slope_scale` coefficient is the `k - 1`th square pyramidal
# number [1], which is given by
# (k - 1) * k * (2 * k - 1) / 6.
#
# [1] https://en.wikipedia.org/wiki/Square_pyramidal_number
variance_from_level = (thinned_samples.level_scale[..., tf.newaxis]**2 *
num_steps_from_last_observation)
variance_from_slope = thinned_samples.slope_scale[..., tf.newaxis]**2 * (
(num_steps_from_last_observation - 1) *
num_steps_from_last_observation *
(2 * num_steps_from_last_observation - 1)) / 6.
y_scale = (original_scale * tf.sqrt(
thinned_samples.observation_noise_scale[..., tf.newaxis]**2 +
variance_from_level + variance_from_slope))
num_posterior_draws = prefer_static.shape(y_mean)[0]
return tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(
logits=tf.zeros([num_posterior_draws], dtype=y_mean.dtype)),
components_distribution=tfd.Normal(
loc=dist_util.move_dimension(y_mean, 0, -1),
scale=dist_util.move_dimension(y_scale, 0, -1)))
def _resample_weights(design_matrix, target_residuals, observation_noise_scale,
weights_prior_scale, seed=None):
"""Samples regression weights from their conditional posterior.
This assumes a conjugate normal regression model,
```
weights ~ Normal(loc=0., covariance_matrix=weights_prior_scale**2 * I)
target_residuals ~ Normal(loc=matvec(design_matrix, weights),
covariance_matrix=observation_noise_scale**2 * I)
```
and returns a sample from `p(weights | target_residuals,
observation_noise_scale, design_matrix)`.
Args:
design_matrix: Float `Tensor` design matrix of shape
`[..., num_timesteps, num_features]`.
target_residuals: Float `Tensor` of shape `[..., num_observations]`
observation_noise_scale: Scalar float `Tensor` (with optional batch shape)
standard deviation of the iid observation noise.
weights_prior_scale: Instance of `tf.linalg.LinearOperator` of shape
`[num_features, num_features]` (with optional batch shape),
specifying the scale of a multivariate Normal prior on regression
weights.
seed: Optional `Python` `int` seed controlling the sampled values.
Returns:
weights: Float `Tensor` of shape `[..., num_features]`, sampled from
the conditional posterior `p(weights | target_residuals,
observation_noise_scale, weights_prior_scale)`.
"""
weights_mean, weights_prec = (
normal_conjugate_posteriors.mvn_conjugate_linear_update(
linear_transformation=design_matrix,
observation=target_residuals,
prior_scale=weights_prior_scale,
likelihood_scale=tf.linalg.LinearOperatorScaledIdentity(
num_rows=prefer_static.shape(design_matrix)[-2],
multiplier=observation_noise_scale)))
sampled_weights = weights_prec.cholesky().solvevec(
samplers.normal(
shape=prefer_static.shape(weights_mean),
dtype=design_matrix.dtype, seed=seed), adjoint=True)
return weights_mean + sampled_weights
def _resample_latents(observed_residuals,
level_scale,
observation_noise_scale,
initial_state_prior,
slope_scale=None,
is_missing=None,
sample_shape=(),
seed=None):
"""Uses Durbin-Koopman sampling to resample the latent level and slope.
Durbin-Koopman sampling [1] is an efficient algorithm to sample from the
posterior latents of a linear Gaussian state space model. This method
implements the algorithm.
[1] Durbin, J. and Koopman, S.J. (2002) A simple and efficient simulation
smoother for state space time series analysis.
Args:
observed_residuals: Float `Tensor` of shape `[..., num_observations]`,
specifying the centered observations `(x - loc)`.
level_scale: Float scalar `Tensor` (may contain batch dimensions)
specifying the standard deviation of the level random walk steps.
observation_noise_scale: Float scalar `Tensor` (may contain batch
dimensions) specifying the standard deviation of the observation noise.
initial_state_prior: instance of `tfd.MultivariateNormalLinearOperator`.
slope_scale: Optional float scalar `Tensor` (may contain batch dimensions)
specifying the standard deviation of slope random walk steps. If
provided, a `LocalLinearTrend` model is used, otherwise, a `LocalLevel`
model is used.
is_missing: Optional `bool` `Tensor` missingness mask.
sample_shape: Optional `int` `Tensor` shape of samples to draw.
seed: `int` `Tensor` of shape `[2]` controlling stateless sampling.
Returns:
latents: Float `Tensor` resampled latent level, of shape
`[..., num_timesteps, latent_size]`, where `...` concatenates the
sample shape with any batch shape from `observed_time_series`.
"""
num_timesteps = prefer_static.shape(observed_residuals)[-1]
if slope_scale is None:
ssm = sts.LocalLevelStateSpaceModel(
num_timesteps=num_timesteps,
initial_state_prior=initial_state_prior,
observation_noise_scale=observation_noise_scale,
level_scale=level_scale)
else:
ssm = sts.LocalLinearTrendStateSpaceModel(
num_timesteps=num_timesteps,
initial_state_prior=initial_state_prior,
observation_noise_scale=observation_noise_scale,
level_scale=level_scale,
slope_scale=slope_scale)
return ssm.posterior_sample(observed_residuals[..., tf.newaxis],
sample_shape=sample_shape,
mask=is_missing,
seed=seed)
def _resample_scale(prior, observed_residuals,
is_missing=None,
seed=None):
"""Samples a scale parameter from its conditional posterior.
We assume the conjugate InverseGamma->Normal model:
```
scale ~ Sqrt(InverseGamma(prior.concentration, prior.scale))
for i in [1, ..., num_observations]:
x[i] ~ Normal(loc, scale)
```
in which `loc` is known, and return a sample from `p(scale | x)`.
Args:
prior: Prior distribution as a `tfd.InverseGamma` instance.
observed_residuals: Float `Tensor` of shape `[..., num_observations]`,
specifying the centered observations `(x - loc)`.
is_missing: Optional `bool` `Tensor` of shape `[..., num_observations]`. A
`True` value indicates that the corresponding observation is missing.
seed: Optional `Python` `int` seed controlling the sampled value.
Returns:
sampled_scale: A `Tensor` sample from the posterior `p(scale | x)`.
"""
if is_missing is not None:
num_missing = tf.reduce_sum(tf.cast(is_missing, observed_residuals.dtype),
axis=-1)
num_observations = prefer_static.shape(observed_residuals)[-1]
if is_missing is not None:
observed_residuals = tf.where(is_missing,
tf.zeros_like(observed_residuals),
observed_residuals)
num_observations -= num_missing
variance_posterior = type(prior)(
concentration=prior.concentration + num_observations / 2.,
scale=prior.scale + tf.reduce_sum(
tf.square(observed_residuals), axis=-1) / 2.)
new_scale = tf.sqrt(variance_posterior.sample(seed=seed))
# Support truncated priors.
if hasattr(prior, 'upper_bound') and prior.upper_bound is not None:
new_scale = tf.minimum(new_scale, prior.upper_bound)
return new_scale
def _build_sampler_loop_body(model,
observed_time_series,
is_missing=None):
"""Builds a Gibbs sampler for the given model and observed data.
Args:
model: A `tf.sts.StructuralTimeSeries` model instance. This must be of the
form constructed by `build_model_for_gibbs_sampling`.
observed_time_series: Float `Tensor` time series of shape
`[..., num_timesteps]`.
is_missing: Optional `bool` `Tensor` of shape `[..., num_timesteps]`. A
`True` value indicates that the observation for that timestep is missing.
Returns:
sampler_loop_body: Python callable that performs a single cycle of Gibbs
sampling. Its first argument is a `GibbsSamplerState`, and it returns a
new `GibbsSamplerState`. The second argument (passed by `tf.scan`) is
ignored.
"""
level_component = model.components[0]
if not (isinstance(level_component, sts.LocalLevel) or
isinstance(level_component, sts.LocalLinearTrend)):
raise ValueError('Expected the first model component to be an instance of '
'`tfp.sts.LocalLevel` or `tfp.sts.LocalLinearTrend`; '
'instead saw {}'.format(level_component))
model_has_slope = isinstance(level_component, sts.LocalLinearTrend)
regression_component = model.components[1]
if not (isinstance(regression_component, sts.LinearRegression) or
isinstance(regression_component, SpikeAndSlabSparseLinearRegression)):
raise ValueError('Expected the second model component to be an instance of '
'`tfp.sts.LinearRegression` or '
'`SpikeAndSlabSparseLinearRegression`; '
'instead saw {}'.format(regression_component))
model_has_spike_slab_regression = isinstance(
regression_component, SpikeAndSlabSparseLinearRegression)
if is_missing is not None: # Ensure series does not contain NaNs.
observed_time_series = tf.where(is_missing,
tf.zeros_like(observed_time_series),
observed_time_series)
num_observed_steps = prefer_static.shape(observed_time_series)[-1]
design_matrix = _get_design_matrix(model).to_dense()[:num_observed_steps]
if is_missing is not None:
# Replace design matrix with zeros at unobserved timesteps. This ensures
# they will not affect the posterior on weights.
design_matrix = tf.where(is_missing[..., tf.newaxis],
tf.zeros_like(design_matrix),
design_matrix)
# Untransform scale priors -> variance priors by reaching thru Sqrt bijector.
observation_noise_param = model.parameters[0]
if 'observation_noise' not in observation_noise_param.name:
raise ValueError('Model parameters {} do not match the expected sampler '
'state.'.format(model.parameters))
observation_noise_variance_prior = observation_noise_param.prior.distribution
if model_has_slope:
level_scale_variance_prior, slope_scale_variance_prior = [
p.prior.distribution for p in level_component.parameters]
else:
level_scale_variance_prior = (
level_component.parameters[0].prior.distribution)
if model_has_spike_slab_regression:
spike_and_slab_sampler = spike_and_slab.SpikeSlabSampler(
design_matrix,
weights_prior_precision=regression_component._weights_prior_precision, # pylint: disable=protected-access
nonzero_prior_prob=regression_component._sparse_weights_nonzero_prob, # pylint: disable=protected-access
observation_noise_variance_prior_concentration=(
observation_noise_variance_prior.concentration),
observation_noise_variance_prior_scale=(
observation_noise_variance_prior.scale),
observation_noise_variance_upper_bound=(
observation_noise_variance_prior.upper_bound
if hasattr(observation_noise_variance_prior, 'upper_bound')
else None))
else:
weights_prior_scale = (
regression_component.parameters[0].prior.scale)
def sampler_loop_body(previous_sample, _):
"""Runs one sampler iteration, resampling all model variables."""
(weights_seed,
level_seed,
observation_noise_scale_seed,
level_scale_seed,
loop_seed) = samplers.split_seed(
previous_sample.seed, n=5, salt='sampler_loop_body')
# Preserve backward-compatible seed behavior by splitting slope separately.
slope_scale_seed, = samplers.split_seed(
previous_sample.seed, n=1, salt='sampler_loop_body_slope')
# We encourage a reasonable initialization by sampling the weights first,
# so at the first step they are regressed directly against the observed
# time series. If we instead sampled the level first it might 'explain away'
# some observed variation that we would ultimately prefer to explain through
# the regression weights, because the level can represent arbitrary
# variation, while the weights are limited to representing variation in the
# subspace given by the design matrix.
if model_has_spike_slab_regression:
(observation_noise_variance,
weights) = spike_and_slab_sampler.sample_noise_variance_and_weights(
initial_nonzeros=tf.not_equal(previous_sample.weights, 0.),
targets=observed_time_series - previous_sample.level,
seed=weights_seed)
observation_noise_scale = tf.sqrt(observation_noise_variance)
else:
weights = _resample_weights(
design_matrix=design_matrix,
target_residuals=observed_time_series - previous_sample.level,
observation_noise_scale=previous_sample.observation_noise_scale,
weights_prior_scale=weights_prior_scale,
seed=weights_seed)
# Noise scale will be resampled below.
observation_noise_scale = previous_sample.observation_noise_scale
regression_residuals = observed_time_series - tf.linalg.matvec(
design_matrix, weights)
latents = _resample_latents(
observed_residuals=regression_residuals,
level_scale=previous_sample.level_scale,
slope_scale=previous_sample.slope_scale if model_has_slope else None,
observation_noise_scale=observation_noise_scale,
initial_state_prior=level_component.initial_state_prior,
is_missing=is_missing,
seed=level_seed)
level = latents[..., 0]
level_residuals = level[..., 1:] - level[..., :-1]
if model_has_slope:
slope = latents[..., 1]
level_residuals -= slope[..., :-1]
slope_residuals = slope[..., 1:] - slope[..., :-1]
# Estimate level scale from the empirical changes in level.
level_scale = _resample_scale(
prior=level_scale_variance_prior,
observed_residuals=level_residuals,
is_missing=None,
seed=level_scale_seed)
if model_has_slope:
slope_scale = _resample_scale(
prior=slope_scale_variance_prior,
observed_residuals=slope_residuals,
is_missing=None,
seed=slope_scale_seed)
if not model_has_spike_slab_regression:
# Estimate noise scale from the residuals.
observation_noise_scale = _resample_scale(
prior=observation_noise_variance_prior,
observed_residuals=regression_residuals - level,
is_missing=is_missing,
seed=observation_noise_scale_seed)
return GibbsSamplerState(
observation_noise_scale=observation_noise_scale,
level_scale=level_scale,
slope_scale=(slope_scale if model_has_slope
else previous_sample.slope_scale),
weights=weights,
level=level,
slope=(slope if model_has_slope
else previous_sample.slope),
seed=loop_seed)
return sampler_loop_body
|
tensorflow/probability
|
tensorflow_probability/python/experimental/sts_gibbs/gibbs_sampler.py
|
Python
|
apache-2.0
| 39,186
|
[
"Gaussian"
] |
108909f65619070077de296717f3118ae3684c7a8dacded2a5fa03eb93b003a3
|
# univariate linear regression
import numpy as np
# http://blog.csdn.net/anneqiqi/article/details/64125186
import matplotlib.pyplot as plt
# %matplotlib inline
'''
# test
r1 = np.linspace(0, 10, 1000)
s1 = np.random.normal(0, 0.1, 1000) # Gaussian distribution
s2 = 100 * s1
#print("r1:", r1)
#print("s1:", s1)
plt.hist(s1, 30, normed=True)
plt.scatter(r1, s2, c='b', marker='x', linewidths=1)
plt.show()
'''
xspan = 10
yspan = 5
pt_nr = 20
# generate samples
x = np.random.uniform(0, xspan, pt_nr) # random 20 points from 0 to 10
s = np.random.normal(0, 0.4, pt_nr) # random 20 points with Gaussian distribution
y = 0.3 * x + 1.2 + s
'''
x: [ 1.66037434 1.95266279 2.8147086 8.89097838 4.8200674 8.90089403
9.83820364 9.18638708 2.59446442 9.49083537 3.50936275 4.97584871
1.45376931 8.75816922 0.43968531 0.43335477 8.04606206 0.91245808
6.99357023 7.30005788]
s: [ 0.43712712 0.49887417 -0.29299843 -0.79883757 0.06786659 -0.38046738
0.58698775 0.09069122 -0.40379319 -0.18667535 0.03894096 1.14068685
-0.44386485 -0.05862218 -0.55119885 0.15874211 -0.32966037 0.54605339
0.56806316 -0.57677176]
y: [ 2.13523942 2.28467301 1.75141415 3.06845594 2.71388681 3.48980083
4.73844885 4.04660735 1.57454613 3.86057526 2.29174979 3.83344147
1.19226595 3.76882859 0.78070674 1.48874854 3.28415825 2.01979081
3.86613423 2.81324561]
print("x:", x)
print("s:", s)
print("y:", y)
'''
plt.scatter(x, y, c='r', marker='x', linewidths=1)
plt.xlim(0, xspan)
plt.ylim(0, yspan)
plt.xlabel('x axis')
plt.ylabel('y axis')
# optimization
x1 = x.reshape([pt_nr, -1])
y1 = y.reshape([pt_nr, -1])
X = np.hstack([x1, np.ones((x1.shape[0], 1))]) # 20*2
print("x1:", x1)
print("y1:", y1)
print("X:", X)
def compute_cost(X, y, w):
cost = np.sum(np.square(X.dot(w) - y)) / (2 * len(y))
return cost
def gradient_descent(X, y, lr = 0.01, iter_nr = 500):
m = len(y)
w = np.zeros((2, 1)) # 2 * 1
hist = np.zeros(iter_nr)
for it in np.arange(iter_nr):
w = w - lr * X.T.dot(X.dot(w) - y) / m
hist[it]= compute_cost(X, y, w)
return (w, hist)
w, hist = gradient_descent(X, y1)
# w: [[ 0.33080648]
# [ 1.09871028]]
print("w:", w)
#print("hist:", hist)
# plot
x_ = np.linspace(0, xspan)
y_ = w[0] * x_ + w[1]
plt.scatter(X[:,0], y1, c='r', marker='x', linewidths=1, label='Data')
plt.plot(x_, y_, label='Linear regression(gradient descent')
# Analytical solution for comparision
w_a = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
plt.plot(x_, w_a[0] * x_ + w_a[1], label='Linear regression(normal equation)')
plt.xlim(0, xspan)
plt.ylim(0, yspan)
plt.xlabel('x');
plt.ylabel('y');
plt.legend();
plt.show()
# plot the convergence curve
plt.plot(hist, label='Convergence')
plt.ylabel('cost');
plt.xlabel('step');
plt.show()
|
yejingfu/samples
|
ml/lession1/linearregression.py
|
Python
|
mit
| 2,833
|
[
"Gaussian"
] |
71252bea28978f5c9889dca501489959e5947346e5f8f5960c910a56d216da7b
|
#!/usr/bin/env python
"""
This file demonstrates drawing graphs using circular vertices via
vtkRenderedGraphRepresentation.
"""
from vtk import *
#------------------------------------------------------------------------------
# Script Entry Point
#------------------------------------------------------------------------------
if __name__ == "__main__":
# Create a random graph
source = vtkRandomGraphSource()
source.SetNumberOfVertices(15)
source.SetStartWithTree(True)
#----------------------------------------------------------
# Draw the graph in a window
theme = vtkViewTheme.CreateMellowTheme()
theme.SetLineWidth(4)
theme.SetPointSize(15)
theme.SetCellOpacity(1)
theme.FastDelete()
# Rendered graph representation to make vertices circles
rep = vtkRenderedGraphRepresentation()
rep.SetInputConnection(0, source.GetOutputPort())
# vtkGraphToGlyph::CIRCLE == 7
rep.SetGlyphType(7)
# View containing the merged graph
view = vtkGraphLayoutView()
view.SetRepresentation( rep )
view.SetVertexLabelArrayName("vertex id")
view.SetVertexLabelVisibility(True)
view.SetLayoutStrategyToSimple2D()
view.ApplyViewTheme(theme)
view.GetRenderWindow().SetSize(600, 600)
view.ResetCamera()
view.Render()
view.GetInteractor().Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Examples/Infovis/Python/circular_vertices.py
|
Python
|
bsd-3-clause
| 1,383
|
[
"VTK"
] |
e7efe3f6895eda12164f1df5c1e07759565f1afc3f732703cbf1d3abd7071da7
|
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
import datetime
import functools
import io
import json
import random
import shutil
import tempfile
import ddt
import pytest
from boto.exception import BotoServerError
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.urls import reverse as django_reverse
from django.http import HttpRequest, HttpResponse
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from pytz import UTC
from django.utils.translation import ugettext as _
from mock import Mock, NonCallableMock, patch
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import UsageKey
from six import text_type
import lms.djangoapps.instructor.views.api
import lms.djangoapps.instructor_task.api
from bulk_email.models import BulkEmailFlag, CourseEmail, CourseEmailTemplate
from lms.djangoapps.certificates.models import CertificateStatuses
from lms.djangoapps.certificates.tests.factories import GeneratedCertificateFactory
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from courseware.models import StudentFieldOverride, StudentModule
from courseware.tests.factories import (
BetaTesterFactory,
GlobalStaffFactory,
InstructorFactory,
StaffFactory,
UserProfileFactory
)
from courseware.tests.helpers import LoginEnrollmentTestCase
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
from lms.djangoapps.instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from lms.djangoapps.instructor.views.api import (
_split_input_list,
common_exceptions_400,
generate_unique_password,
require_finance_admin
)
from lms.djangoapps.instructor_task.api_helper import (
AlreadyRunningError,
QueueConnectionError,
generate_already_running_error_message
)
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohorted
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.site_configuration.tests.mixins import SiteMixin
from openedx.core.lib.tests import attr
from openedx.core.lib.xblock_utils import grade_histogram
from shoppingcart.models import (
Coupon,
CouponRedemption,
CourseRegistrationCode,
CourseRegistrationCodeInvoiceItem,
Invoice,
InvoiceTransaction,
Order,
PaidCourseRegistration,
RegistrationCodeRedemption
)
from shoppingcart.pdf import PDFInvoice
from student.models import (
ALLOWEDTOENROLL_TO_ENROLLED,
ALLOWEDTOENROLL_TO_UNENROLLED,
ENROLLED_TO_ENROLLED,
ENROLLED_TO_UNENROLLED,
UNENROLLED_TO_ALLOWEDTOENROLL,
UNENROLLED_TO_ENROLLED,
UNENROLLED_TO_UNENROLLED,
CourseEnrollment,
CourseEnrollmentAllowed,
ManualEnrollmentAudit,
NonExistentCourseError,
get_retired_email_by_email,
get_retired_username_by_username
)
from student.roles import CourseBetaTesterRole, CourseFinanceAdminRole, CourseInstructorRole, CourseSalesAdminRole
from student.tests.factories import AdminFactory, UserFactory
from xmodule.fields import Date
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .test_tools import msk_from_problem_urlname
DATE_FIELD = Date()
EXPECTED_CSV_HEADER = (
'"code","redeem_code_url","course_id","company_name","created_by","redeemed_by","invoice_id","purchaser",'
'"customer_reference_number","internal_reference"'
)
EXPECTED_COUPON_CSV_HEADER = '"Coupon Code","Course Id","% Discount","Description","Expiration Date",' \
'"Is Active","Code Redeemed Count","Total Discounted Seats","Total Discounted Amount"'
# ddt data for test cases involving reports
REPORTS_DATA = (
{
'report_type': 'grade',
'instructor_api_endpoint': 'calculate_grades_csv',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_grades_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrolled learner profile',
'instructor_api_endpoint': 'get_students_features',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_students_features_csv',
'extra_instructor_api_kwargs': {'csv': '/csv'}
},
{
'report_type': 'detailed enrollment',
'instructor_api_endpoint': 'get_enrollment_report',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_detailed_enrollment_features_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrollment',
'instructor_api_endpoint': 'get_students_who_may_enroll',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_may_enroll_csv',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'proctored exam results',
'instructor_api_endpoint': 'get_proctored_exam_results',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_proctored_exam_results_report',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'problem responses',
'instructor_api_endpoint': 'get_problem_responses',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_problem_responses_csv',
'extra_instructor_api_kwargs': {},
}
)
# ddt data for test cases involving executive summary report
EXECUTIVE_SUMMARY_DATA = (
{
'report_type': 'executive summary',
'task_type': 'exec_summary_report',
'instructor_api_endpoint': 'get_exec_summary_report',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_executive_summary_report',
'extra_instructor_api_kwargs': {}
},
)
INSTRUCTOR_GET_ENDPOINTS = set([
'get_anon_ids',
'get_coupon_codes',
'get_issued_certificates',
'get_sale_order_records',
'get_sale_records',
])
INSTRUCTOR_POST_ENDPOINTS = set([
'active_registration_codes',
'add_users_to_cohorts',
'bulk_beta_modify_access',
'calculate_grades_csv',
'change_due_date',
'export_ora2_data',
'generate_registration_codes',
'get_enrollment_report',
'get_exec_summary_report',
'get_grading_config',
'get_problem_responses',
'get_proctored_exam_results',
'get_registration_codes',
'get_student_progress_url',
'get_students_features',
'get_students_who_may_enroll',
'get_user_invoice_preference',
'list_background_email_tasks',
'list_course_role_members',
'list_email_content',
'list_entrance_exam_instructor_tasks',
'list_financial_report_downloads',
'list_forum_members',
'list_instructor_tasks',
'list_report_downloads',
'mark_student_can_skip_entrance_exam',
'modify_access',
'register_and_enroll_students',
'rescore_entrance_exam',
'rescore_problem',
'reset_due_date',
'reset_student_attempts',
'reset_student_attempts_for_entrance_exam',
'sale_validation',
'show_student_extensions',
'show_unit_extensions',
'send_email',
'spent_registration_codes',
'students_update_enrollment',
'update_forum_role_membership',
'override_problem_score',
])
def reverse(endpoint, args=None, kwargs=None, is_dashboard_endpoint=True):
"""
Simple wrapper of Django's reverse that first ensures that we have declared
each endpoint under test.
Arguments:
args: The args to be passed through to reverse.
endpoint: The endpoint to be passed through to reverse.
kwargs: The kwargs to be passed through to reverse.
is_dashboard_endpoint: True if this is an instructor dashboard endpoint
that must be declared in the INSTRUCTOR_GET_ENDPOINTS or
INSTRUCTOR_GET_ENDPOINTS sets, or false otherwise.
Returns:
The return of Django's reverse function
"""
is_endpoint_declared = endpoint in INSTRUCTOR_GET_ENDPOINTS or endpoint in INSTRUCTOR_POST_ENDPOINTS
if is_dashboard_endpoint and is_endpoint_declared is False:
# Verify that all endpoints are declared so we can ensure they are
# properly validated elsewhere.
raise ValueError("The endpoint {} must be declared in ENDPOINTS before use.".format(endpoint))
return django_reverse(endpoint, args=args, kwargs=kwargs)
@common_exceptions_400
def view_success(request): # pylint: disable=unused-argument
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=unused-argument
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=unused-argument
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
@common_exceptions_400
def view_alreadyrunningerror_unicode(request): # pylint: disable=unused-argument
"""
A dummy view that raises an AlreadyRunningError exception with unicode message
"""
raise AlreadyRunningError(u'Text with unicode chárácters')
@common_exceptions_400
def view_queue_connection_error(request): # pylint: disable=unused-argument
"""
A dummy view that raises a QueueConnectionError exception.
"""
raise QueueConnectionError()
@attr(shard=5)
@ddt.ddt
class TestCommonExceptions400(TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
super(TestCommonExceptions400, self).setUp()
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
@ddt.data(True, False)
def test_alreadyrunningerror(self, is_ajax):
self.request.is_ajax.return_value = is_ajax
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("Requested task is already running", resp.content)
@ddt.data(True, False)
def test_alreadyrunningerror_with_unicode(self, is_ajax):
self.request.is_ajax.return_value = is_ajax
resp = view_alreadyrunningerror_unicode(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn('Text with unicode chárácters', resp.content)
@ddt.data(True, False)
def test_queue_connection_error(self, is_ajax):
"""
Tests that QueueConnectionError exception is handled in common_exception_400.
"""
self.request.is_ajax.return_value = is_ajax
resp = view_queue_connection_error(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn('Error occured. Please try again later', resp.content)
@attr(shard=5)
@ddt.ddt
class TestEndpointHttpMethods(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users can make GET requests against endpoints that allow GET,
and not against those that don't allow GET.
"""
@classmethod
def setUpClass(cls):
"""
Set up test course.
"""
super(TestEndpointHttpMethods, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
"""
Set up global staff role so authorization will not fail.
"""
super(TestEndpointHttpMethods, self).setUp()
global_user = GlobalStaffFactory()
self.client.login(username=global_user.username, password='test')
@ddt.data(*INSTRUCTOR_POST_ENDPOINTS)
def test_endpoints_reject_get(self, data):
"""
Tests that POST endpoints are rejected with 405 when using GET.
"""
url = reverse(data, kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertEqual(
response.status_code, 405,
"Endpoint {} returned status code {} instead of a 405. It should not allow GET.".format(
data, response.status_code
)
)
@ddt.data(*INSTRUCTOR_GET_ENDPOINTS)
def test_endpoints_accept_get(self, data):
"""
Tests that GET endpoints are not rejected with 405 when using GET.
"""
url = reverse(data, kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertNotEqual(
response.status_code, 405,
"Endpoint {} returned status code 405 where it shouldn't, since it should allow GET.".format(
data
)
)
@attr(shard=5)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
class TestInstructorAPIDenyLevels(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIDenyLevels, cls).setUpClass()
cls.course = CourseFactory.create()
cls.chapter = ItemFactory.create(
parent=cls.course,
category='chapter',
display_name="Chapter",
publish_item=True,
start=datetime.datetime(2018, 3, 10, tzinfo=UTC),
)
cls.sequential = ItemFactory.create(
parent=cls.chapter,
category='sequential',
display_name="Lesson",
publish_item=True,
start=datetime.datetime(2018, 3, 10, tzinfo=UTC),
metadata={'graded': True, 'format': 'Homework'},
)
cls.vertical = ItemFactory.create(
parent=cls.sequential,
category='vertical',
display_name='Subsection',
publish_item=True,
start=datetime.datetime(2018, 3, 10, tzinfo=UTC),
)
cls.problem = ItemFactory.create(
category="problem",
parent=cls.vertical,
display_name="A Problem Block",
weight=1,
publish_item=True,
)
cls.problem_urlname = text_type(cls.problem.location)
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
@classmethod
def tearDownClass(cls):
super(TestInstructorAPIDenyLevels, cls).tearDownClass()
BulkEmailFlag.objects.all().delete()
def setUp(self):
super(TestInstructorAPIDenyLevels, self).setUp()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem.location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment',
{'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('update_forum_role_membership',
{'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('send_email', {'send_to': '["staff"]', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('list_financial_report_downloads', {}),
('calculate_grades_csv', {}),
('get_students_features', {}),
('get_enrollment_report', {}),
('get_students_who_may_enroll', {}),
('get_exec_summary_report', {}),
('get_proctored_exam_results', {}),
('get_problem_responses', {}),
('export_ora2_data', {}),
('rescore_problem',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('override_problem_score',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email, 'score': 0}),
('reset_student_attempts',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
(
'reset_student_attempts',
{
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.user.email,
'delete_module': True
}
),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem', {'problem_to_reset': self.problem_urlname, 'all_students': True}),
('reset_student_attempts', {'problem_to_reset': self.problem_urlname, 'all_students': True}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': text_type(self.course.id)})
if endpoint in INSTRUCTOR_GET_ENDPOINTS:
response = self.client.get(url, args)
else:
response = self.client.post(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def _access_problem_responses_endpoint(self, msg):
"""
Access endpoint for problem responses report, ensuring that
UsageKey.from_string returns a problem key that the endpoint
can work with.
msg: message to display if assertion fails.
"""
mock_problem_key = NonCallableMock(return_value=u'')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
self._access_endpoint('get_problem_responses', {}, 200, msg)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(staff_member)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
expected_status = 200
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'list_forum_members']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Staff member should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(inst)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
expected_status = 200
# TODO: make these work
if endpoint in ['update_forum_role_membership']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Instructor should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
expected_status = 200
self._access_endpoint(
endpoint,
args,
expected_status,
"Instructor should be allowed to access endpoint " + endpoint
)
@attr(shard=5)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
class TestInstructorAPIBulkAccountCreationAndEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test Bulk account creation and enrollment from csv file
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Create a course with mode 'audit'
cls.audit_course = CourseFactory.create()
CourseModeFactory.create(course_id=cls.audit_course.id, mode_slug=CourseMode.AUDIT)
cls.url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(cls.course.id)}
)
cls.audit_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(cls.audit_course.id)}
)
def setUp(self):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, self).setUp()
# Create a course with mode 'honor' and with price
self.white_label_course = CourseFactory.create()
self.white_label_course_mode = CourseModeFactory.create(
course_id=self.white_label_course.id,
mode_slug=CourseMode.HONOR,
min_price=10,
suggested_prices='10',
)
self.white_label_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(self.white_label_course.id)}
)
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.audit_course_instructor = InstructorFactory(course_key=self.audit_course.id)
self.white_label_course_instructor = InstructorFactory(course_key=self.white_label_course.id)
self.client.login(username=self.instructor.username, password='test')
self.not_enrolled_student = UserFactory(
username='NotEnrolledStudent',
email='nonenrolled@test.com',
first_name='NotEnrolled',
last_name='Student'
)
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv_with_blank_lines(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "\ntest_student@example.com,test_student_1,tester1,USA\n\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_email_and_username_already_exist(self, info_log):
"""
If the email address and username already exists
and the user is enrolled in the course, do nothing (including no email gets sent out)
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with(
u"user already exists with username '%s' and email '%s'",
'test_student_1',
'test_student@example.com'
)
def test_file_upload_type_not_csv(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.jpg", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Make sure that the file you upload is in CSV format with no extraneous characters or rows.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_bad_file_upload_type(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.csv", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Could not read uploaded file.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_insufficient_data(self):
"""
Try uploading a CSV file which does not have the exact four columns of data
"""
csv_content = "test_student@example.com,test_student_1\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 1)
self.assertEquals(data['general_errors'][0]['response'], 'Data in row #1 must have exactly four columns: email, username, full name, and country')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_invalid_email_in_csv(self):
"""
Test failure case of a poorly formatted email field
"""
csv_content = "test_student.example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
data = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Invalid email {0}.'.format('test_student.example.com'))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_csv_user_exist_and_not_enrolled(self, info_log):
"""
If the email address and username already exists
and the user is not enrolled in the course, enrolled him/her and iterate to next one.
"""
csv_content = "nonenrolled@test.com,NotEnrolledStudent,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
info_log.assert_called_with(
u'user %s enrolled in the course %s',
u'NotEnrolledStudent',
self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_email_in_csv(self):
"""
If the email address already exists, but the username is different,
assume it is the correct user and just register the user in the course.
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_2,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
warning_message = 'An account with email {email} exists but the provided username {username} ' \
'is different. Enrolling anyway with {email}.'.format(email='test_student@example.com', username='test_student_2')
self.assertNotEquals(len(data['warnings']), 0)
self.assertEquals(data['warnings'][0]['response'], warning_message)
user = User.objects.get(email='test_student@example.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_retired_email_in_csv(self):
"""
If the CSV contains email addresses which correspond with users which
have already been retired, confirm that the attempt returns invalid
email errors.
"""
# This email address is re-used to create a retired account and another account.
conflicting_email = 'test_student@example.com'
# prep a retired user
user = UserFactory.create(username='old_test_student', email=conflicting_email)
user.email = get_retired_email_by_email(user.email)
user.username = get_retired_username_by_username(user.username)
user.is_active = False
user.save()
csv_content = "{email},{username},tester,USA".format(email=conflicting_email, username='new_test_student')
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(
data['row_errors'][0]['response'],
'Invalid email {email}.'.format(email=conflicting_email)
)
self.assertFalse(User.objects.filter(email=conflicting_email).exists())
def test_user_with_already_existing_username_in_csv(self):
"""
If the username already exists (but not the email),
assume it is a different user and fail to create the new account.
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
def test_csv_file_not_attached(self):
"""
Test when the user does not attach a file
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'file_not_found': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'File is not attached.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_raising_exception_in_auto_registration_and_enrollment_case(self):
"""
Test that exceptions are handled well
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
with patch('lms.djangoapps.instructor.views.api.create_manual_course_enrollment') as mock:
mock.side_effect = NonExistentCourseError()
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'NonExistentCourseError')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_generate_unique_password(self):
"""
generate_unique_password should generate a unique password string that excludes certain characters.
"""
password = generate_unique_password([], 12)
self.assertEquals(len(password), 12)
for letter in password:
self.assertNotIn(letter, 'aAeEiIoOuU1l')
def test_users_created_and_enrolled_successfully_if_others_fail(self):
# prep a retired user
user = UserFactory.create(username='old_test_student_4', email='test_student4@example.com')
user.email = get_retired_email_by_email(user.email)
user.username = get_retired_username_by_username(user.username)
user.is_active = False
user.save()
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student3@example.com,test_student_1,tester3,CA\n" \
"test_student4@example.com,test_student_4,tester4,USA\n" \
"test_student2@example.com,test_student_2,tester2,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(
data['row_errors'][0]['response'],
'Username {user} already exists.'.format(user='test_student_1')
)
self.assertEquals(
data['row_errors'][1]['response'],
'Invalid email {email}.'.format(email='test_student4@example.com')
)
self.assertTrue(User.objects.filter(username='test_student_1', email='test_student1@example.com').exists())
self.assertTrue(User.objects.filter(username='test_student_2', email='test_student2@example.com').exists())
self.assertFalse(User.objects.filter(email='test_student3@example.com').exists())
self.assertFalse(User.objects.filter(email='test_student4@example.com').exists())
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
@patch.object(lms.djangoapps.instructor.views.api, 'generate_random_string',
Mock(side_effect=['first', 'first', 'second']))
def test_generate_unique_password_no_reuse(self):
"""
generate_unique_password should generate a unique password string that hasn't been generated before.
"""
generated_password = ['first']
password = generate_unique_password(generated_password, 12)
self.assertNotEquals(password, 'first')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': False})
def test_allow_automated_signups_flag_not_set(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEquals(response.status_code, 403)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_audit_enrollment_mode(self):
"""
Test that enrollment mode for audit courses (paid courses) is 'audit'.
"""
# Login Audit Course instructor
self.client.login(username=self.audit_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.audit_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be 'audit'
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.AUDIT)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_honor_enrollment_mode(self):
"""
Test that enrollment mode for unpaid honor courses is 'honor'.
"""
# Remove white label course price
self.white_label_course_mode.min_price = 0
self.white_label_course_mode.suggested_prices = ''
self.white_label_course_mode.save()
# Login Audit Course instructor
self.client.login(username=self.white_label_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.white_label_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be 'honor'
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.HONOR)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_default_shopping_cart_enrollment_mode_for_white_label(self):
"""
Test that enrollment mode for white label courses (paid courses) is DEFAULT_SHOPPINGCART_MODE_SLUG.
"""
# Login white label course instructor
self.client.login(username=self.white_label_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.white_label_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@attr(shard=5)
@ddt.ddt
class TestInstructorAPIEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = configuration_helpers.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIEnrollment, self).setUp()
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',
last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url,
{'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll',
'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll',
'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been enrolled in {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name, proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in {display_name}.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name, proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account,"
" you will see {display_name} listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, display_name=self.course.display_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in {display_name} "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student".format(
display_name=self.course.display_name,
)
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url,
{'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ALLOWEDTOENROLL_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course {display_name} by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org".format(
display_name=self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path,
display_name=self.course.display_name,
)
)
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'enroll',
'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(self.course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
def test_enroll_already_enrolled_student(self):
"""
Ensure that already enrolled "verified" students cannot be downgraded
to "honor"
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# make this enrollment "verified"
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
# now re-enroll the student through the instructor dash
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
# affirm that the student is still in "verified" mode
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_ENROLLED)
self.assertEqual(course_enrollment.mode, u"verified")
def create_paid_course(self):
"""
create paid course mode.
"""
paid_course = CourseFactory.create()
CourseModeFactory.create(course_id=paid_course.id, min_price=50, mode_slug=CourseMode.HONOR)
CourseInstructorRole(paid_course.id).add_users(self.instructor)
return paid_course
def test_unenrolled_allowed_to_enroll_user(self):
"""
test to unenroll allow to enroll user.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(paid_course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing..', 'role': 'Learner'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# now registered the user
UserFactory(email=self.notregistered_email)
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(paid_course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing', 'role': 'Learner'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
self.assertEqual(manual_enrollments[1].state_transition, ALLOWEDTOENROLL_TO_ENROLLED)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": True,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": True,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_already_not_enrolled_user(self):
"""
test unenrolled user already not enrolled in a course.
"""
paid_course = self.create_paid_course()
course_enrollment = CourseEnrollment.objects.filter(
user__email=self.notregistered_email, course_id=paid_course.id
)
self.assertEqual(course_enrollment.count(), 0)
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(paid_course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'unenroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing', 'role': 'Learner'}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenroll_and_enroll_verified(self):
"""
Test that unenrolling and enrolling a student from a verified track
results in that student being in the default track
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# upgrade enrollment
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
self._change_student_enrollment(self.enrolled_student, self.course, 'unenroll')
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
self.assertEqual(course_enrollment.mode, CourseMode.DEFAULT_MODE_SLUG)
def test_role_and_reason_are_persisted(self):
"""
test that role and reason fields are persisted in the database
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': text_type(paid_course.id)})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing', 'role': 'Learner'}
response = self.client.post(url, params)
manual_enrollment = ManualEnrollmentAudit.objects.first()
self.assertEqual(manual_enrollment.reason, 'testing')
self.assertEqual(manual_enrollment.role, 'Learner')
self.assertEqual(response.status_code, 200)
def _change_student_enrollment(self, user, course, action):
"""
Helper function that posts to 'students_update_enrollment' to change
a student's enrollment
"""
url = reverse(
'students_update_enrollment',
kwargs={'course_id': text_type(course.id)},
)
params = {
'identifiers': user.email,
'action': action,
'email_students': True,
'reason': 'change user enrollment',
'role': 'Learner'
}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
return response
@attr(shard=5)
@ddt.ddt
class TestInstructorAPIBulkBetaEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkBetaEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = configuration_helpers.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIBulkBetaEnrollment, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {}\n\nYou have been invited to be a beta tester "
"for {} at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {}".format(
self.notenrolled_student.profile.name,
self.course.display_name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url,
{'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True,
"is_active": None
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been removed from a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"{display_name} at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
display_name=self.course.display_name,
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@attr(shard=5)
class TestInstructorAPILevelsAccess(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsAccess, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsAccess, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save()
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': text_type(self.course.id),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': text_type(self.course.id),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
for user in [self.instructor, self.other_user]:
for identifier_attr in [user.email, user.username]:
for rolename in ["Administrator", "Moderator", "Community TA"]:
for action in ["allow", "revoke"]:
self.assert_update_forum_role_membership(user, identifier_attr, rolename, action)
def assert_update_forum_role_membership(self, current_user, identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(
url,
{
'unique_student_identifier': identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = current_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@attr(shard=5)
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorAPILevelsDataDump(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsDataDump, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsDataDump, self).setUp()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
# Create testing invoice 1
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
self.students_who_may_enroll = self.students + [UserFactory() for _ in range(5)]
for student in self.students_who_may_enroll:
CourseEnrollmentAllowed.objects.create(
email=student.email, course_id=self.course.id
)
def register_with_redemption_code(self, user, code):
"""
enroll user using a registration code
"""
redeem_url = reverse('register_code_redemption', args=[code], is_dashboard_endpoint=False)
self.client.login(username=user.username, password='test')
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertIn('Activate Course Enrollment', response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
def test_invalidate_sale_record(self):
"""
Testing the sale invalidating scenario.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=text_type(self.course.id),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
data = {'invoice_number': self.sale_invoice_1.id, 'event_type': "invalidate"}
url = reverse('sale_validation', kwargs={'course_id': text_type(self.course.id)})
self.assert_request_status_code(200, url, method="POST", data=data)
#Now try to fetch data against not existing invoice number
test_data_1 = {'invoice_number': 100, 'event_type': "invalidate"}
self.assert_request_status_code(404, url, method="POST", data=test_data_1)
# Now invalidate the same invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("The sale associated with this invoice has already been invalidated.", response.content)
# now re_validate the invoice number
data['event_type'] = "re_validate"
self.assert_request_status_code(200, url, method="POST", data=data)
# Now re_validate the same active invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("This invoice is already active.", response.content)
test_data_2 = {'invoice_number': self.sale_invoice_1.id}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_2)
self.assertIn("Missing required event_type parameter", response.content)
test_data_3 = {'event_type': "re_validate"}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_3)
self.assertIn("Missing required invoice_number parameter", response.content)
# submitting invalid invoice number
data['invoice_number'] = 'testing'
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("invoice_number must be an integer, {value} provided".format(value=data['invoice_number']), response.content)
def test_get_sale_order_records_features_csv(self):
"""
Test that the response from get_sale_order_records is in csv format.
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
self.cart.order_type = 'business'
self.cart.save()
self.cart.add_billing_details(company_name='Test Company', company_contact_name='Test',
company_contact_email='test@123', recipient_name='R1',
recipient_email='', customer_reference_number='PO#23')
paid_course_reg_item = PaidCourseRegistration.add_to_order(
self.cart,
self.course.id,
mode_slug=CourseMode.HONOR
)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(
reverse('shoppingcart.views.update_user_cart', is_dashboard_endpoint=False),
{'ItemId': paid_course_reg_item.id, 'qty': '4'}
)
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(
reverse('shoppingcart.views.use_code', is_dashboard_endpoint=False),
{'code': coupon.code}
)
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
# get the updated item
item = self.cart.orderitem_set.all().select_subclasses()[0]
# get the redeemed coupon information
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(order=self.cart)
sale_order_url = reverse('get_sale_order_records', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(sale_order_url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertIn('36', response.content.split('\r\n')[1])
self.assertIn(str(item.unit_cost), response.content.split('\r\n')[1],)
self.assertIn(str(item.list_price), response.content.split('\r\n')[1],)
self.assertIn(item.status, response.content.split('\r\n')[1],)
self.assertIn(coupon_redemption[0].coupon.code, response.content.split('\r\n')[1],)
def test_coupon_redeem_count_in_ecommerce_section(self):
"""
Test that checks the redeem count in the instructor_dashboard coupon section
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
# Coupon Redeem Count only visible for Financial Admins.
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(
reverse('shoppingcart.views.use_code', is_dashboard_endpoint=False),
{'code': coupon.code}
)
self.assertEqual(resp.status_code, 200)
# URL for instructor dashboard
instructor_dashboard = reverse(
'instructor_dashboard',
kwargs={'course_id': text_type(self.course.id)},
is_dashboard_endpoint=False
)
# visit the instructor dashboard page and
# check that the coupon redeem count should be 0
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>0</td>', resp.content)
# now make the payment of your cart items
self.cart.purchase()
# visit the instructor dashboard page and
# check that the coupon redeem count should be 1
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>1</td>', resp.content)
def test_get_sale_records_features_csv(self):
"""
Test that the response from get_sale_records is in csv format.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=text_type(self.course.id),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse(
'get_sale_records',
kwargs={'course_id': text_type(self.course.id)}
)
response = self.client.post(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_sale_records_features_json(self):
"""
Test that the response from get_sale_records is in json format.
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=text_type(self.course.id),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
for res in res_json['sale']:
self.validate_sale_records_response(
res,
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
def test_get_sale_records_features_with_multiple_invoices(self):
"""
Test that the response from get_sale_records is in json format for multiple invoices
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='qwerty{}'.format(i),
course_id=text_type(self.course.id),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
# Create test invoice 2
sale_invoice_2 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw_2', recipient_email='test2@test.com', customer_reference_number='2Fwe23S',
internal_reference="B", course_id=self.course.id
)
invoice_item_2 = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=sale_invoice_2,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='xyzmn{}'.format(i), course_id=text_type(self.course.id),
created_by=self.instructor, invoice=sale_invoice_2, invoice_item=invoice_item_2, mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
self.validate_sale_records_response(
res_json['sale'][0],
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
self.validate_sale_records_response(
res_json['sale'][1],
course_registration_code,
sale_invoice_2,
0,
invoice_item=invoice_item_2
)
def validate_sale_records_response(self, res, course_registration_code, invoice, used_codes, invoice_item):
"""
validate sale records attribute values with the response object
"""
self.assertEqual(res['total_amount'], invoice.total_amount)
self.assertEqual(res['recipient_email'], invoice.recipient_email)
self.assertEqual(res['recipient_name'], invoice.recipient_name)
self.assertEqual(res['company_name'], invoice.company_name)
self.assertEqual(res['company_contact_name'], invoice.company_contact_name)
self.assertEqual(res['company_contact_email'], invoice.company_contact_email)
self.assertEqual(res['internal_reference'], invoice.internal_reference)
self.assertEqual(res['customer_reference_number'], invoice.customer_reference_number)
self.assertEqual(res['invoice_number'], invoice.id)
self.assertEqual(res['created_by'], course_registration_code.created_by.username)
self.assertEqual(res['course_id'], text_type(invoice_item.course_id))
self.assertEqual(res['total_used_codes'], used_codes)
self.assertEqual(res['total_codes'], 5)
def test_get_problem_responses_invalid_location(self):
"""
Test whether get_problem_responses returns an appropriate status
message when users submit an invalid problem location.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
problem_location = ''
response = self.client.post(url, {'problem_location': problem_location})
res_json = json.loads(response.content)
self.assertEqual(res_json, 'Could not find problem with this location.')
def valid_problem_location(test): # pylint: disable=no-self-argument
"""
Decorator for tests that target get_problem_responses endpoint and
need to pretend user submitted a valid problem location.
"""
@functools.wraps(test)
def wrapper(self, *args, **kwargs):
"""
Run `test` method, ensuring that UsageKey.from_string returns a
problem key that the get_problem_responses endpoint can
work with.
"""
mock_problem_key = NonCallableMock(return_value=u'')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
test(self, *args, **kwargs)
return wrapper
@valid_problem_location
def test_get_problem_responses_successful(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation was started successfully.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
problem_location = ''
response = self.client.post(url, {'problem_location': problem_location})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
status = res_json['status']
self.assertIn('is being created', status)
self.assertNotIn('already in progress', status)
self.assertIn("task_id", res_json)
@valid_problem_location
def test_get_problem_responses_already_running(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation is already in progress.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
task_type = 'problem_responses_csv'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_calculate_problem_responses_csv') as submit_task_function:
error = AlreadyRunningError(already_running_status)
submit_task_function.side_effect = error
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
for student in self.students:
student.profile.city = "Mos Eisley {}".format(student.id)
student.profile.save()
url = reverse('get_students_features', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
self.assertEqual(student_json['city'], student.profile.city)
self.assertEqual(student_json['country'], "")
@ddt.data(True, False)
def test_get_students_features_cohorted(self, is_cohorted):
"""
Test that get_students_features includes cohort info when the course is
cohorted, and does not when the course is not cohorted.
"""
url = reverse('get_students_features', kwargs={'course_id': text_type(self.course.id)})
set_course_cohorted(self.course.id, is_cohorted)
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertEqual('cohort' in res_json['feature_names'], is_cohorted)
@ddt.data(True, False)
def test_get_students_features_teams(self, has_teams):
"""
Test that get_students_features includes team info when the course is
has teams enabled, and does not when the course does not have teams enabled
"""
if has_teams:
self.course = CourseFactory.create(teams_configuration={
'max_size': 2, 'topics': [{'topic-id': 'topic', 'name': 'Topic', 'description': 'A Topic'}]
})
course_instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=course_instructor.username, password='test')
url = reverse('get_students_features', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertEqual('team' in res_json['feature_names'], has_teams)
def test_get_students_who_may_enroll(self):
"""
Test whether get_students_who_may_enroll returns an appropriate
status message when users request a CSV file of students who
may enroll in a course.
"""
url = reverse(
'get_students_who_may_enroll',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# CSV generation already in progress:
task_type = 'may_enroll_info_csv'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_calculate_may_enroll_csv') as submit_task_function:
error = AlreadyRunningError(already_running_status)
submit_task_function.side_effect = error
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_get_student_exam_results(self):
"""
Test whether get_proctored_exam_results returns an appropriate
status message when users request a CSV file.
"""
url = reverse(
'get_proctored_exam_results',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# CSV generation already in progress:
task_type = 'proctored_exam_results_report'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_proctored_exam_results_report') as submit_task_function:
error = AlreadyRunningError(already_running_status)
submit_task_function.side_effect = error
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_access_course_finance_admin_with_invalid_course_key(self):
"""
Test assert require_course fiance_admin before generating
a detailed enrollment report
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'invalid_course_key')
self.assertEqual(response.status_code, 404)
self.assertFalse(func.called)
def mock_request(self):
"""
mock request
"""
request = Mock()
request.user = self.instructor
return request
def test_access_course_finance_admin_with_valid_course_key(self):
"""
Test to check the course_finance_admin role with valid key
but doesn't have access to the function
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'valid/course/key')
self.assertEqual(response.status_code, 403)
self.assertFalse(func.called)
def test_add_user_to_fiance_admin_role_with_valid_course(self):
"""
test to check that a function is called using a fiance_admin
rights.
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
decorated_func(request, text_type(self.course.id))
self.assertTrue(func.called)
def test_enrollment_report_features_csv(self):
"""
test to generate enrollment report.
enroll users, admin staff using registration codes.
"""
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=text_type(self.course.id),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
admin_user = AdminFactory()
admin_cart = Order.get_cart_for_user(admin_user)
PaidCourseRegistration.add_to_order(admin_cart, self.course.id)
admin_cart.purchase()
# create a new user/student and enroll
# in the course using a registration code
# and then validates the generated detailed enrollment report
test_user = UserFactory()
self.register_with_redemption_code(test_user, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
UserProfileFactory.create(user=self.students[0], meta='{"company": "asdasda"}')
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_bulk_purchase_detailed_report(self):
"""
test to generate detailed enrollment report.
1 Purchase registration codes.
2 Enroll users via registration code.
3 Validate generated enrollment report.
"""
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(
reverse('shoppingcart.views.update_user_cart', is_dashboard_endpoint=False),
{'ItemId': paid_course_reg_item.id, 'qty': '4'}
)
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(
reverse('shoppingcart.views.use_code', is_dashboard_endpoint=False),
{'code': self.coupon_code}
)
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=self.cart)
self.register_with_redemption_code(self.instructor, course_reg_codes[0].code)
test_user = UserFactory()
test_user_cart = Order.get_cart_for_user(test_user)
PaidCourseRegistration.add_to_order(test_user_cart, self.course.id)
test_user_cart.purchase()
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=-self.sale_invoice_1.total_amount,
status='refunded',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=text_type(self.course.id),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_create_registration_code_without_invoice_and_order(self):
"""
test generate detailed enrollment report,
used a registration codes which has been created via invoice or bulk
purchase scenario.
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=text_type(self.course.id),
created_by=self.instructor,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_invoice_payment_is_still_pending_for_registration_codes(self):
"""
test generate enrollment report
enroll a user in a course using registration code
whose invoice has not been paid yet
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=text_type(self.course.id),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
@patch.object(lms.djangoapps.instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(lms.djangoapps.instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"{user_id}","41","42"\n'.format(user_id=self.students[0].id)
))
self.assertTrue(
body.endswith('"{user_id}","41","42"\n'.format(user_id=self.students[-1].id))
)
@patch('lms.djangoapps.instructor_task.models.logger.error')
@patch.dict(settings.GRADES_DOWNLOAD, {'STORAGE_TYPE': 's3'})
def test_list_report_downloads_error(self, mock_error):
"""
Tests the Rate-Limit exceeded is handled and does not raise 500 error.
"""
ex_status = 503
ex_reason = 'Slow Down'
url = reverse('list_report_downloads', kwargs={'course_id': text_type(self.course.id)})
with patch('openedx.core.storage.S3ReportStorage.listdir', side_effect=BotoServerError(ex_status, ex_reason)):
response = self.client.post(url, {})
mock_error.assert_called_with(
u'Fetching files failed for course: %s, status: %s, reason: %s',
self.course.id,
ex_status,
ex_reason,
)
res_json = json.loads(response.content)
self.assertEqual(res_json, {"downloads": []})
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': text_type(self.course.id)})
with patch('lms.djangoapps.instructor_task.models.DjangoStorageReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.post(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
@valid_problem_location
def test_calculate_report_csv_success(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
success_status = "The {report_type} report is being created.".format(report_type=report_type)
with patch(task_api_endpoint) as patched_task_api_endpoint:
patched_task_api_endpoint.return_value.task_id = "12345667-9abc-deff-ffed-cba987654321"
if report_type == 'problem responses':
response = self.client.post(url, {'problem_location': ''})
self.assertIn(success_status, response.content)
else:
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
response = self.client.post(url, {})
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_success(
self,
report_type,
task_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
): # pylint: disable=unused-argument
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.post(url, {})
success_status = "The {report_type} report is being created." \
" To view the status of the report, see Pending" \
" Tasks below".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_already_running(
self,
report_type,
task_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
already_running_status = generate_already_running_error_message(task_type)
with patch(task_api_endpoint) as mock:
mock.side_effect = AlreadyRunningError(already_running_status)
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_get_ora2_responses_success(self):
url = reverse('export_ora2_data', kwargs={'course_id': text_type(self.course.id)})
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_data') as mock_submit_ora2_task:
mock_submit_ora2_task.return_value = True
response = self.client.post(url, {})
success_status = "The ORA data report is being created."
self.assertIn(success_status, response.content)
def test_get_ora2_responses_already_running(self):
url = reverse('export_ora2_data', kwargs={'course_id': text_type(self.course.id)})
task_type = 'export_ora2_data'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_data') as mock_submit_ora2_task:
mock_submit_ora2_task.side_effect = AlreadyRunningError(already_running_status)
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': text_type(self.course.id)})
data = {'unique_student_identifier': self.students[0].email.encode("utf-8")}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': text_type(self.course.id)})
data = {'unique_student_identifier': self.students[0].username.encode("utf-8")}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
@attr(shard=5)
class TestInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = text_type(cls.problem_location)
def setUp(self):
super(TestInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(lms.djangoapps.instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@patch('lms.djangoapps.grades.signals.handlers.PROBLEM_WEIGHTED_SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_course_has_entrance_exam_in_student_attempts_reset(self):
""" Test course has entrance exam id set while resetting attempts"""
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'delete_module': False,
})
self.assertEqual(response.status_code, 400)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test course has entrance exam id set while re-scoring. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@attr(shard=5)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
@ddt.ddt
class TestEntranceExamInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can rescore student grades,
reset student attempts and delete state for entrance exam.
"""
@classmethod
def setUpClass(cls):
super(TestEntranceExamInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create(
org='test_org',
course='test_course',
run='test_run',
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.course_with_invalid_ee = CourseFactory.create(entrance_exam_id='invalid_exam')
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.entrance_exam = ItemFactory.create(
parent=cls.course,
category='chapter',
display_name='Entrance exam'
)
subsection = ItemFactory.create(
parent=cls.entrance_exam,
category='sequential',
display_name='Subsection 1'
)
vertical = ItemFactory.create(
parent=subsection,
category='vertical',
display_name='Vertical 1'
)
cls.ee_problem_1 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 1"
)
cls.ee_problem_2 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 2"
)
def setUp(self):
super(TestEntranceExamInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
# Add instructor to invalid ee course
CourseInstructorRole(self.course_with_invalid_ee.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
ee_module_to_reset1 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_1.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
ee_module_to_reset2 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_2.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
self.ee_modules = [ee_module_to_reset1.module_state_key, ee_module_to_reset2.module_state_key]
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_grade_histogram(self, store):
"""
Verify that a histogram has been created.
"""
course = CourseFactory.create(default_store=store)
usage_key = course.id.make_usage_key('problem', 'first_problem')
StudentModule.objects.create(
student_id=1,
grade=100,
module_state_key=usage_key
)
StudentModule.objects.create(
student_id=2,
grade=50,
module_state_key=usage_key
)
grades = grade_histogram(usage_key)
self.assertEqual(grades[0], (50.0, 1))
self.assertEqual(grades[1], (100.0, 1))
def test_reset_entrance_exam_student_attempts_delete_all(self):
""" Make sure no one can delete all students state on entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_entrance_exam_student_attempts_single(self):
""" Test reset single student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
for changed_module in changed_modules:
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(lms.djangoapps.instructor_task.api, 'submit_reset_problem_attempts_in_entrance_exam')
def test_reset_entrance_exam_all_student_attempts(self, act):
""" Test reset all student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_invalid_entrance_exam(self):
""" Test reset for invalid entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_entrance_exam_student_delete_state(self):
""" Test delete single student entrance exam state. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
self.assertEqual(changed_modules.count(), 0)
def test_entrance_exam_delete_state_with_staff(self):
""" Test entrance exam delete state failure with staff access. """
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 403)
def test_entrance_exam_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_entrance_exam_for_student')
def test_rescore_entrance_exam_single_student(self, act):
""" Test re-scoring of entrance exam for single student. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_rescore_entrance_exam_all_student(self):
""" Test rescoring for all students. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_if_higher_all_student(self):
""" Test rescoring for all students only if higher. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'only_if_higher': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_all_student_and_single(self):
""" Test re-scoring with both all students and single student parameters. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test re-scoring of entrance exam with invalid exam. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_list_entrance_exam_instructor_tasks_student(self):
""" Test list task history for entrance exam AND student. """
# create a re-score entrance exam task
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0]['status'], _('Complete'))
def test_list_entrance_exam_instructor_tasks_all_student(self):
""" Test list task history for entrance exam AND all student. """
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 0)
def test_list_entrance_exam_instructor_with_invalid_exam_key(self):
""" Test list task history for entrance exam failure if course has invalid exam. """
url = reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_skip_entrance_exam_student(self):
""" Test skip entrance exam api for student. """
# create a re-score entrance exam task
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
message = _('This student (%s) will skip the entrance exam.') % self.student.email
self.assertContains(response, message)
# post again with same student
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
# This time response message should be different
message = _('This student (%s) is already allowed to skip the entrance exam.') % self.student.email
self.assertContains(response, message)
@attr(shard=5)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
class TestInstructorSendEmail(SiteMixin, SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorSendEmail, cls).setUpClass()
cls.course = CourseFactory.create()
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
cls.full_test_message = {
'send_to': '["myself", "staff"]',
'subject': test_subject,
'message': test_message,
}
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
@classmethod
def tearDownClass(cls):
super(TestInstructorSendEmail, cls).tearDownClass()
BulkEmailFlag.objects.all().delete()
def setUp(self):
super(TestInstructorSendEmail, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_invalid_sendto(self):
url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'send_to': '["invalid_target", "staff"]',
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'send_to': '["staff"]',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'send_to': '["staff"]',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
def test_send_email_with_site_template_and_from_addr(self):
site_email = self.site_configuration.values.get('course_email_from_addr')
site_template = self.site_configuration.values.get('course_email_template_name')
CourseEmailTemplate.objects.create(name=site_template)
url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
self.assertEqual(1, CourseEmail.objects.filter(
course_id=self.course.id,
sender=self.instructor,
subject=self.full_test_message['subject'],
html_message=self.full_test_message['message'],
template_name=site_template,
from_addr=site_email
).count())
def test_send_email_with_org_template_and_from_addr(self):
org_email = 'fake_org@example.com'
org_template = 'fake_org_email_template'
CourseEmailTemplate.objects.create(name=org_template)
self.site_configuration.values.update({
'course_email_from_addr': {self.course.id.org: org_email},
'course_email_template_name': {self.course.id.org: org_template}
})
self.site_configuration.save()
url = reverse('send_email', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
self.assertEqual(1, CourseEmail.objects.filter(
course_id=self.course.id,
sender=self.instructor,
subject=self.full_test_message['subject'],
html_message=self.full_test_message['message'],
template_name=org_template,
from_addr=org_email
).count())
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@attr(shard=5)
class TestInstructorAPITaskLists(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
@classmethod
def setUpClass(cls):
super(TestInstructorAPITaskLists, cls).setUpClass()
cls.course = CourseFactory.create(
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = text_type(cls.problem_location)
def setUp(self):
super(TestInstructorAPITaskLists, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
@patch.object(lms.djangoapps.instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': text_type(self.course.id)})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': text_type(self.course.id)})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': text_type(self.course.id)})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': text_type(self.course.id)})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@attr(shard=5)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history', autospec=True)
class TestInstructorEmailContentList(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorEmailContentList, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorEmailContentList, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def setup_fake_email_info(self, num_emails, with_failures=False):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
if with_failures:
failed = random.randint(1, 15401)
else:
failed = 0
self.tasks[email_id] = FakeContentTask(email_id, num_sent, failed, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent, failed)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request, with_failures=False):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails, with_failures)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': text_type(self.course.id)})
with patch('lms.djangoapps.instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
return response
def check_emails_sent(self, num_emails, task_history_request, with_failures=False):
""" Tests sending emails with or without failures """
response = self.get_email_content_response(num_emails, task_history_request, with_failures)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), num_emails)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(expected_email_info, actual_email_info)
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
self.check_emails_sent(50, task_history_request)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent', 'requester']:
self.assertEqual(returned_info[info], None)
def test_list_email_with_failure(self, task_history_request):
""" Test the handling of email task that had failures """
self.check_emails_sent(1, task_history_request, True)
def test_list_many_emails_with_failures(self, task_history_request):
""" Test the handling of many emails with failures """
self.check_emails_sent(50, task_history_request, True)
def test_list_email_with_no_successes(self, task_history_request):
task_info = FakeContentTask(0, 0, 10, 'expected')
email = FakeEmail(0)
email_info = FakeEmailInfo(email, 0, 10)
task_history_request.return_value = [task_info]
url = reverse('list_email_content', kwargs={'course_id': text_type(self.course.id)})
with patch('lms.djangoapps.instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.return_value = email
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_info_list = json.loads(response.content)['emails']
self.assertEqual(len(returned_info_list), 1)
returned_info = returned_info_list[0]
expected_info = email_info.to_dict()
self.assertDictEqual(expected_info, returned_info)
@attr(shard=5)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append(
"Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus',
'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
[u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = CourseKey.from_string('MITx/6.002x/2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(unicode(msk_from_problem_urlname(course_id, name)), output)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
with pytest.raises(ValueError):
msk_from_problem_urlname(*args)
def get_extended_due(course, unit, user):
"""
Gets the overridden due date for the given user on the given unit. Returns
`None` if there is no override set.
"""
try:
override = StudentFieldOverride.objects.get(
course_id=course.id,
student=user,
location=unit.location,
field='due'
)
return DATE_FIELD.from_json(json.loads(override.value))
except StudentFieldOverride.DoesNotExist:
return None
@attr(shard=5)
class TestDueDateExtensions(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
@classmethod
def setUpClass(cls):
super(TestDueDateExtensions, cls).setUpClass()
cls.course = CourseFactory.create()
cls.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=UTC)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.week1 = ItemFactory.create(due=cls.due)
cls.week2 = ItemFactory.create(due=cls.due)
cls.week3 = ItemFactory.create() # No due date
cls.course.children = [
text_type(cls.week1.location),
text_type(cls.week2.location),
text_type(cls.week3.location)
]
cls.homework = ItemFactory.create(
parent_location=cls.week1.location,
due=cls.due
)
cls.week1.children = [text_type(cls.homework.location)]
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensions, self).setUp()
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': text_type(self.week1.location),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=UTC),
get_extended_due(self.course, self.week1, self.user1))
def test_change_to_invalid_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': text_type(self.week1.location),
'due_datetime': '01/01/2009 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_change_nonexistent_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': text_type(self.week3.location),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week3, self.user1)
)
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': text_type(self.week1.location),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_reset_nonexistent_extension(self):
url = reverse('reset_due_date', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': text_type(self.week1.location),
})
self.assertEqual(response.status_code, 400, response.content)
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'url': text_type(self.week1.location)})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@attr(shard=5)
class TestDueDateExtensionsDeletedDate(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Tests for deleting due date extensions
"""
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensionsDeletedDate, self).setUp()
self.course = CourseFactory.create()
self.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=UTC)
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.week1 = ItemFactory.create(due=self.due)
self.week2 = ItemFactory.create(due=self.due)
self.week3 = ItemFactory.create() # No due date
self.course.children = [
text_type(self.week1.location),
text_type(self.week2.location),
text_type(self.week3.location)
]
self.homework = ItemFactory.create(
parent_location=self.week1.location,
due=self.due
)
self.week1.children = [text_type(self.homework.location)]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_reset_extension_to_deleted_date(self):
"""
Test that we can delete a due date extension after deleting the normal
due date, without causing an error.
"""
url = reverse('change_due_date', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': text_type(self.week1.location),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=UTC),
get_extended_due(self.course, self.week1, self.user1))
self.week1.due = None
self.week1 = self.store.update_item(self.week1, self.user1.id)
# Now, week1's normal due date is deleted but the extension still exists.
url = reverse('reset_due_date', kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url, {
'student': self.user1.username,
'url': text_type(self.week1.location),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
@attr(shard=5)
class TestCourseIssuedCertificatesData(SharedModuleStoreTestCase):
"""
Test data dumps for issued certificates.
"""
@classmethod
def setUpClass(cls):
super(TestCourseIssuedCertificatesData, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCourseIssuedCertificatesData, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def generate_certificate(self, course_id, mode, status):
"""
Generate test certificate
"""
test_user = UserFactory()
GeneratedCertificateFactory.create(
user=test_user,
course_id=course_id,
mode=mode,
status=status
)
def test_certificates_features_against_status(self):
"""
Test certificates with status 'downloadable' should be in the response.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.generating)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 0)
# Certificates with status 'downloadable' should be in response.
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 1)
def test_certificates_features_group_by_mode(self):
"""
Test for certificate csv features against mode. Certificates should be group by 'mode' in reponse.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 1)
# retrieve the first certificate from the list, there should be 3 certificates for 'honor' mode.
certificate = res_json['certificates'][0]
self.assertEqual(certificate.get('total_issued_certificate'), 3)
self.assertEqual(certificate.get('mode'), 'honor')
self.assertEqual(certificate.get('course_id'), str(self.course.id))
# Now generating downloadable certificates with 'verified' mode
for __ in xrange(certificate_count):
self.generate_certificate(
course_id=self.course.id,
mode='verified',
status=CertificateStatuses.downloadable
)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
# total certificate count should be 2 for 'verified' mode.
self.assertEqual(len(res_json['certificates']), 2)
# retrieve the second certificate from the list
certificate = res_json['certificates'][1]
self.assertEqual(certificate.get('total_issued_certificate'), 3)
self.assertEqual(certificate.get('mode'), 'verified')
def test_certificates_features_csv(self):
"""
Test for certificate csv features.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
current_date = datetime.date.today().strftime("%B %d, %Y")
response = self.client.get(url, {'csv': 'true'})
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(response['Content-Disposition'], 'attachment; filename={0}'.format('issued_certificates.csv'))
self.assertEqual(
response.content.strip(),
'"CourseID","Certificate Type","Total Certificates Issued","Date Report Run"\r\n"'
+ str(self.course.id) + '","honor","3","' + current_date + '"'
)
@attr(shard=5)
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(SharedModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
@classmethod
def setUpClass(cls):
super(TestCourseRegistrationCodes, cls).setUpClass()
cls.course = CourseFactory.create()
cls.url = reverse(
'generate_registration_codes',
kwargs={'course_id': text_type(cls.course.id)}
)
def setUp(self):
"""
Fixtures.
"""
super(TestCourseRegistrationCodes, self).setUp()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
CourseSalesAdminRole(self.course.id).add_users(self.instructor)
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(self.url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
@override_settings(FINANCE_EMAIL='finance@example.com')
def test_finance_email_in_recipient_list_when_generating_registration_codes(self):
"""
Test to verify that the invoice will also be sent to the FINANCE_EMAIL when
generating registration codes
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': text_type(self.course.id)})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# check for the last mail.outbox, The FINANCE_EMAIL has been appended at the
# very end, when generating registration codes
self.assertEqual(mail.outbox[-1].to[0], 'finance@example.com')
def test_user_invoice_copy_preference(self):
"""
Test to remember user invoice copy preference
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': text_type(self.course.id)})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
# user invoice copy preference will be saved in api user preference; model
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], True)
# updating the user invoice copy preference during code generation flow
data['invoice'] = ''
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': text_type(self.course.id)})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], False)
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': text_type(self.course.id)})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
def test_generate_course_registration_with_redeem_url_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': text_type(self.course.id)})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
rows = body.split('\n')
index = 1
while index < len(rows):
if rows[index]:
row_data = rows[index].split(',')
code = row_data[0].replace('"', '')
self.assertTrue(row_data[1].startswith('"http')
and row_data[1].endswith('/shoppingcart/register/redeem/{0}/"'.format(code)))
index += 1
@patch.object(lms.djangoapps.instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': text_type(self.course.id)})
coupon = Coupon(code='first', course_id=text_type(self.course.id), created_by=self.instructor)
coupon.save()
data = {
'total_registration_codes': 3, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(lms.djangoapps.instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': text_type(self.course.id)})
data = {
'total_registration_codes': 2, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': text_type(self.course.id)})
data = {'spent_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 7)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': text_type(self.course.id)}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'unit_price': 122.45, 'company_contact_email': 'Test@company.com', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': text_type(self.course.id)})
data = {'active_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 9)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': text_type(self.course.id)}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'active_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse(
'get_registration_codes', kwargs={'course_id': text_type(self.course.id)}
)
data = {'download_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 14)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': text_type(self.course.id)}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'download_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_pdf_file_throws_exception(self):
"""
test to mock the pdf file generation throws an exception
when generating registration codes.
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': text_type(self.course.id)}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
with patch.object(PDFInvoice, 'generate_pdf', side_effect=Exception):
response = self.client.post(generate_code_url, data)
self.assertEqual(response.status_code, 200, response.content)
def test_get_codes_with_sale_invoice(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': text_type(self.course.id)}
)
data = {
'total_registration_codes': 5.5, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
url = reverse('get_registration_codes',
kwargs={'course_id': text_type(self.course.id)})
data = {'download_company_name': 'Group Invoice'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
def test_with_invalid_unit_price(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': text_type(self.course.id)}
)
data = {
'total_registration_codes': 10, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 'invalid', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 400, response.content)
self.assertIn('Could not parse amount as', response.content)
def test_get_historical_coupon_codes(self):
"""
Test to download a response of all the active coupon codes
"""
get_coupon_code_url = reverse(
'get_coupon_codes', kwargs={'course_id': text_type(self.course.id)}
)
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True
)
coupon.save()
#now create coupons with the expiration dates
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
expiration_date=datetime.datetime.now(UTC) + datetime.timedelta(days=2)
)
coupon.save()
response = self.client.post(get_coupon_code_url)
self.assertEqual(response.status_code, 200, response.content)
# filter all the coupons
for coupon in Coupon.objects.all():
self.assertIn(
'"{coupon_code}","{course_id}","{discount}","{description}","{expiration_date}","{is_active}",'
'"{code_redeemed_count}","{total_discounted_seats}","{total_discounted_amount}"'.format(
coupon_code=coupon.code,
course_id=coupon.course_id,
discount=coupon.percentage_discount,
description=coupon.description,
expiration_date=coupon.display_expiry_date,
is_active=coupon.is_active,
code_redeemed_count="0",
total_discounted_seats="0",
total_discounted_amount="0",
), response.content
)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_COUPON_CSV_HEADER))
@attr(shard=5)
class TestBulkCohorting(SharedModuleStoreTestCase):
"""
Test adding users to cohorts in bulk via CSV upload.
"""
@classmethod
def setUpClass(cls):
super(TestBulkCohorting, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestBulkCohorting, self).setUp()
self.staff_user = StaffFactory(course_key=self.course.id)
self.non_staff_user = UserFactory.create()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tempdir)
def call_add_users_to_cohorts(self, csv_data, suffix='.csv'):
"""
Call `add_users_to_cohorts` with a file generated from `csv_data`.
"""
# this temporary file will be removed in `self.tearDown()`
__, file_name = tempfile.mkstemp(suffix=suffix, dir=self.tempdir)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
with open(file_name, 'r') as file_pointer:
url = reverse('add_users_to_cohorts', kwargs={'course_id': unicode(self.course.id)})
return self.client.post(url, {'uploaded-file': file_pointer})
def expect_error_on_file_content(self, file_content, error, file_suffix='.csv'):
"""
Verify that we get the error we expect for a given file input.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content, suffix=file_suffix)
self.assertEqual(response.status_code, 400)
result = json.loads(response.content)
self.assertEqual(result['error'], error)
def verify_success_on_file_content(self, file_content, mock_store_upload, mock_cohort_task):
"""
Verify that `addd_users_to_cohorts` successfully validates the
file content, uploads the input file, and triggers the
background task.
"""
mock_store_upload.return_value = (None, 'fake_file_name.csv')
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content)
self.assertEqual(response.status_code, 204)
self.assertTrue(mock_store_upload.called)
self.assertTrue(mock_cohort_task.called)
def test_no_cohort_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a cohort field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'username,email\n', "The file must contain a 'cohort' column containing cohort names."
)
def test_no_username_or_email_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a username or email field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'cohort\n', "The file must contain a 'username' column, an 'email' column, or both."
)
def test_empty_csv(self):
"""
Verify that we get a descriptive verification error when we haven't
included any data in the uploaded CSV.
"""
self.expect_error_on_file_content(
'', "The file must contain a 'cohort' column containing cohort names."
)
def test_wrong_extension(self):
"""
Verify that we get a descriptive verification error when we haven't
uploaded a file with a '.csv' extension.
"""
self.expect_error_on_file_content(
'', "The file must end with the extension '.csv'.", file_suffix='.notcsv'
)
def test_non_staff_no_access(self):
"""
Verify that we can't access the view when we aren't a staff user.
"""
self.client.login(username=self.non_staff_user.username, password='test')
response = self.call_add_users_to_cohorts('')
self.assertEqual(response.status_code, 403)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_username(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call a background task when
the CSV has username and cohort columns.
"""
self.verify_success_on_file_content(
'username,cohort\nfoo_username,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has email and cohort columns.
"""
self.verify_success_on_file_content(
'email,cohort\nfoo_email,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_username_and_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has username, email and cohort columns.
"""
self.verify_success_on_file_content(
'username,email,cohort\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_carriage_return(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns.
"""
self.verify_success_on_file_content(
'username,email,cohort\rfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_carriage_return_line_feed(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns and line
feeds.
"""
self.verify_success_on_file_content(
'username,email,cohort\r\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
|
ahmedaljazzar/edx-platform
|
lms/djangoapps/instructor/tests/test_api.py
|
Python
|
agpl-3.0
| 230,248
|
[
"VisIt"
] |
033bfd68ad230d26209767734876e25f26db694f12e887eba85493292b44863c
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2011 Umeå University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains classes and functions that a SAML2.0 Service Provider (SP) may use
to do attribute aggregation.
"""
import saml2
DEFAULT_BINDING = saml2.BINDING_SOAP
class AttributeResolver(object):
def __init__(self, metadata=None, config=None, saml2client=None):
self.metadata = metadata
if saml2client:
self.saml2client = saml2client
self.metadata = saml2client.config.metadata
else:
self.saml2client = saml2.client.Saml2Client(config)
def extend(self, subject_id, issuer, vo_members, name_id_format=None,
sp_name_qualifier=None, log=None, real_id=None):
"""
:param subject_id: The identifier by which the subject is know
among all the participents of the VO
:param issuer: Who am I the poses the query
:param vo_members: The entity IDs of the IdP who I'm going to ask
for extra attributes
:param nameid_format: Used to make the IdPs aware of what's going
on here
:param log: Where to log exciting information
:return: A dictionary with all the collected information about the
subject
"""
result = []
for member in vo_members:
for ass in self.metadata.attribute_services(member):
for attr_serv in ass.attribute_service:
if log:
log.info(
"Send attribute request to %s" % attr_serv.location)
if attr_serv.binding != saml2.BINDING_SOAP:
continue
# attribute query assumes SOAP binding
session_info = self.saml2client.attribute_query(
subject_id,
attr_serv.location,
issuer_id=issuer,
sp_name_qualifier=sp_name_qualifier,
nameid_format=name_id_format,
log=log, real_id=real_id)
if session_info:
result.append(session_info)
return result
|
natebeacham/saml2
|
src/saml2/attribute_resolver.py
|
Python
|
bsd-2-clause
| 2,892
|
[
"exciting"
] |
f802010e38b64bd263f5148a01f623fb3fb5cb0fa12d7d7b516f158abdddba62
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Satpy developers
#
# This file is part of satpy.
#
# satpy is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# satpy is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# satpy. If not, see <http://www.gnu.org/licenses/>.
"""VIIRS Active Fires reader.
This module implements readers for VIIRS Active Fires NetCDF and
ASCII files.
"""
import dask.dataframe as dd
import xarray as xr
from satpy.readers.file_handlers import BaseFileHandler
from satpy.readers.netcdf_utils import NetCDF4FileHandler
# map platform attributes to Oscar standard name
PLATFORM_MAP = {
"NPP": "Suomi-NPP",
"J01": "NOAA-20",
"J02": "NOAA-21"
}
class VIIRSActiveFiresFileHandler(NetCDF4FileHandler):
"""NetCDF4 reader for VIIRS Active Fires."""
def __init__(self, filename, filename_info, filetype_info,
auto_maskandscale=False, xarray_kwargs=None):
"""Open and perform initial investigation of NetCDF file."""
super(VIIRSActiveFiresFileHandler, self).__init__(
filename, filename_info, filetype_info,
auto_maskandscale=auto_maskandscale, xarray_kwargs=xarray_kwargs)
self.prefix = filetype_info.get('variable_prefix')
def get_dataset(self, dsid, dsinfo):
"""Get requested data as DataArray.
Args:
dsid: Dataset ID
param2: Dataset Information
Returns:
Dask DataArray: Data
"""
key = dsinfo.get('file_key', dsid['name']).format(variable_prefix=self.prefix)
data = self[key]
# rename "phoney dims"
data = data.rename(dict(zip(data.dims, ['y', 'x'])))
# handle attributes from YAML
for key in ('units', 'standard_name', 'flag_meanings', 'flag_values', '_FillValue'):
# we only want to add information that isn't present already
if key in dsinfo and key not in data.attrs:
data.attrs[key] = dsinfo[key]
if isinstance(data.attrs.get('flag_meanings'), str):
data.attrs['flag_meanings'] = data.attrs['flag_meanings'].split(' ')
# use more common CF standard units
if data.attrs.get('units') == 'kelvins':
data.attrs['units'] = 'K'
data.attrs["platform_name"] = PLATFORM_MAP.get(self.filename_info['satellite_name'].upper(), "unknown")
data.attrs["sensor"] = "VIIRS"
return data
@property
def start_time(self):
"""Get first date/time when observations were recorded."""
return self.filename_info['start_time']
@property
def end_time(self):
"""Get last date/time when observations were recorded."""
return self.filename_info.get('end_time', self.start_time)
@property
def sensor_name(self):
"""Name of sensor for this file."""
return self["sensor"]
@property
def platform_name(self):
"""Name of platform/satellite for this file."""
return self["platform_name"]
class VIIRSActiveFiresTextFileHandler(BaseFileHandler):
"""ASCII reader for VIIRS Active Fires."""
def __init__(self, filename, filename_info, filetype_info):
"""Make sure filepath is valid and then reads data into a Dask DataFrame.
Args:
filename: Filename
filename_info: Filename information
filetype_info: Filetype information
"""
skip_rows = filetype_info.get('skip_rows', 15)
columns = filetype_info['columns']
self.file_content = dd.read_csv(filename, skiprows=skip_rows, header=None, names=columns)
super(VIIRSActiveFiresTextFileHandler, self).__init__(filename, filename_info, filetype_info)
self.platform_name = PLATFORM_MAP.get(self.filename_info['satellite_name'].upper(), "unknown")
def get_dataset(self, dsid, dsinfo):
"""Get requested data as DataArray."""
ds = self[dsid['name']].to_dask_array(lengths=True)
data = xr.DataArray(ds, dims=("y",), attrs={"platform_name": self.platform_name, "sensor": "VIIRS"})
for key in ('units', 'standard_name', 'flag_meanings', 'flag_values', '_FillValue'):
# we only want to add information that isn't present already
if key in dsinfo and key not in data.attrs:
data.attrs[key] = dsinfo[key]
if isinstance(data.attrs.get('flag_meanings'), str):
data.attrs['flag_meanings'] = data.attrs['flag_meanings'].split(' ')
return data
@property
def start_time(self):
"""Get first date/time when observations were recorded."""
return self.filename_info['start_time']
@property
def end_time(self):
"""Get last date/time when observations were recorded."""
return self.filename_info.get('end_time', self.start_time)
def __getitem__(self, key):
"""Get file content for 'key'."""
return self.file_content[key]
def __contains__(self, item):
"""Check if variable is in current file."""
return item in self.file_content
|
pytroll/satpy
|
satpy/readers/viirs_edr_active_fires.py
|
Python
|
gpl-3.0
| 5,530
|
[
"NetCDF"
] |
1baed7dabb44317edc027ecf6efe3b6138c4f60cd2d1fed0d4557f485a7a912b
|
# Copyright 2004, Magnus Hagdorn
#
# This file is part of GLIMMER.
#
# GLIMMER is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GLIMMER is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GLIMMER; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Handle command line options in a standard way."""
__all__ = ['CFOptParser','CFOptions']
import optparse, sys, PyGMT, os.path
from CF_loadfile import *
from CF_profile import *
from CF_rsl import CFRSLlocs
from CF_IOmisc import CFreadlines
class CFOptParser(optparse.OptionParser):
"""Handle options."""
def __init__(self,usage = "usage: %prog [options] infile outfile"):
"""Initialise.
usage: usage string.
"""
optparse.OptionParser.__init__(self,usage)
self.width = 10.
try:
self.rsldb = os.path.join(os.environ['GLIMMER_PREFIX'],'share','PyCF','rsl.db')
except:
self.rsldb = None
def plot(self):
"""Plot options."""
group = optparse.OptionGroup(self,"Plot Options","These options are used to control the appearance of the plot")
group.add_option("--size",dest="size",default="a4",help="Size of output (default a4)")
group.add_option("--landscape",action="store_true", dest="landscape",help="select landscape mode")
group.add_option("--mono",action="store_true",default=False,help="convert colour plots to mono")
group.add_option("--width",type="float",dest="width",default=self.width, help="width of plot (default %.2f cm)"%(self.width))
group.add_option("--verbose",action="store_true", dest="verbose",default=False,help="Be verbose")
self.add_option_group(group)
def region(self):
"""Specifying region of interest."""
group = optparse.OptionGroup(self,"Region Options","These options are used to control the region of interest.")
group.add_option("--llx",dest='llx',metavar="X Y",type="float",nargs=2,help="lower left corner in projected coordinate system")
group.add_option("--urx",dest='urx',metavar="X Y",type="float",nargs=2,help="upper right corner in projected coordinate system")
group.add_option("--llg",dest='llg',metavar="X Y",type="float",nargs=2,help="lower left corner in geographic coordinate system")
group.add_option("--urg",dest='urg',metavar="X Y",type="float",nargs=2,help="upper right corner in geographic coordinate system")
self.add_option_group(group)
def region1d(self,onlyx=False,onlyy=False):
"""Specifying axis ranges."""
group = optparse.OptionGroup(self,"Axis Options","These options are used to control the x and y axis.")
if not onlyy:
group.add_option("--noxauto",action="store_true", default="False",help="Don't expand x range to reasonable values.")
group.add_option("--xrange",type="float",nargs=2,metavar="X1 X2",help="set x-axis range to X1:X2")
if not onlyx:
group.add_option("--noyauto",action="store_true", default="False",help="Don't expand x range to reasonable values.")
group.add_option("--yrange",type="float",nargs=2,metavar="Y1 Y2",help="set y-axis range to Y1:Y2")
self.add_option_group(group)
def eisforcing(self):
"""Options for handling EIS forcing time series."""
group = optparse.OptionGroup(self,"EIS forcing","Files containing time series used for forcing EIS.")
group.add_option("--ela",dest='elafile',metavar="FILE",type="string",help="Name of file containing ELA forcing")
group.add_option("--temp",dest='tempfile',metavar="FILE",type="string",help="Name of file containing temperature forcing")
group.add_option("--type_temp",type="choice",metavar="TYPE",choices=['poly','exp'],default="poly",help="Select temperature calculations (default: poly)")
group.add_option("--lat0_temp",type="float",metavar="LAT",default=44.95,help="Origin latitude for temperature calculations using exponential type (default: 44.95)")
group.add_option("--slc",dest='slcfile',metavar="FILE",type="string",help="Name of file containing SLC forcing")
self.add_option_group(group)
def __var(self):
# variable options
self.add_option("-v","--variable",metavar='NAME',action='append',type="string",dest='vars',help="variable to be processed (this option can be used more than once), append _avg to get the vertically integrated average")
self.add_option("-l","--level",metavar="LEV",type='int',dest='level',help='level to be plotted')
self.add_option("--pmt",action="store_true", dest="pmt",default=False,help='Correct temperature for temperature dependance on pressure')
def var_options(self):
"""Extra variable stuff"""
self.add_option("-c","--clip",metavar='VAR',type="choice",dest='clip',choices=['thk','topg','usurf','is'],help="display variable only where ['thk','topg','usurf','is']>0.")
self.add_option("-i","--illuminate",metavar='VAR',type="choice",dest='illuminate',choices=['thk','topg','usurf','is'],help="illuminate surface using gradient of ['thk','topg','usurf','is']")
self.add_option("--land",action="store_true", dest="land",default=False,help="Indicate area above SL")
try:
self.add_option("--colourmap",type="string",dest="colourmap",help="name of GMT cpt file to be used (autogenerate one when set to None)")
except:
pass
try:
self.add_option("--legend",action="store_true", dest="dolegend",default=False,help="Plot a colour legend")
except:
pass
def variable(self):
"""Variable option."""
self.__var()
self.var_options()
def spot(self):
"""Spot options."""
self.__var()
self.add_option("--ij",dest='ij',metavar="I J",type="int",nargs=2,action='append',help="node to be plotted (this option can be used more than once)")
def profile_file(self,plist=False):
"""Options for profile files.
plist: set to True if a number of profiles can be specified"""
if plist:
self.add_option("-p","--profile",action="append",metavar='PROFILE',type='string',dest='profname',help="name of file containing profile control points (this option can be used more than once)")
else:
self.add_option("-p","--profile",metavar='PROFILE',type='string',dest='profname',help="name of file containing profile control points")
self.add_option("--not_projected",action="store_false",default=True,dest="prof_is_projected",help="Set this flag if the profile data is not projected.")
self.add_option("--interval",type="float",metavar='INTERVAL',default=10000.,help="set interval to INTERVAL (default = 10000.m)")
def profile(self,vars=True):
"""Profile options.
vars: set to False if only profile is needed"""
if vars:
self.__var()
self.profile_file()
self.add_option("--showpmp",action="store_true", dest="showpmp",default=False,help='Indicate pressure melting point of ice (only used for temperatures)')
try:
self.add_option("--colourmap",type="string",dest="colourmap",help="name of GMT cpt file to be used (autogenerate one when set to None)")
except:
pass
try:
self.add_option("--legend",action="store_true", dest="dolegend",default=False,help="Plot a colour legend")
except:
pass
def time(self):
"""Time option."""
self.add_option("-t","--time",metavar='TIME',action='append',type="float",dest='times',help="time to be processed (this option can be used more than once)")
self.add_option("-T","--timeslice",metavar='N',action='append',type="int",help="time slice to be processed (this option can be used more than once)")
def timeint(self):
"""Time interval options."""
self.add_option("-t","--time",metavar='T0 T1',type="float",nargs=2,dest='times',help="specify time interval T0 T1, if none process entire file.")
def epoch(self):
"""Glacial Stages."""
self.add_option("-e","--epoch",metavar='NAME',type="string",help='load glacial stages from file and plot them on time axis')
def rsl(self):
"""RSL options."""
self.add_option("-r","--rsldb",metavar='DB',type="string",default=self.rsldb,help="name of RSL database file [%s]"%self.rsldb)
self.add_option("--rsl_selection",type="choice",choices=CFRSLlocs.keys(),default='fenscan',help="Change selection of RSL locations to be plotted, can be one of %s (default: fenscan)"%str(CFRSLlocs.keys()))
class CFOptions(object):
"""Do some option/argument massaging."""
def __init__(self,parser,numargs=None):
"""Initialise.
parser: Option parser.
numargs: the number of arguments expected. A negative numargs implies the minimum number of arguments."""
self.parser = parser
(self.options, self.args) = self.parser.parse_args()
if numargs != None:
if numargs>=0:
if len(self.args)!=numargs:
self.parser.error('Error, expected %d arguments and got %d arguments\n'%(numargs,len(self.args)))
else:
if len(self.args)<-numargs:
self.parser.error('Error, expected at least %d arguments and got %d arguments\n'%(-numargs,len(self.args)))
def __get_nfiles(self):
return len(self.args)-1
nfiles = property(__get_nfiles)
def __get_nvars(self):
try:
return len(self.options.vars)
except:
return 1
nvars = property(__get_nvars)
def __get_ntimes(self):
try:
return len(self.options.times)
except:
return 1
ntimes = property(__get_ntimes)
def __get_papersize(self):
if self.options.landscape:
orientation = "landscape"
else:
orientation = "portrait"
return PyGMT.PaperSize(self.options.size,orientation)
papersize = property(__get_papersize)
def plot(self,argn=-1,number=None):
"""Setup plot.
argn: number of argument holding output name.
number: number of series in file"""
orientation = "portrait"
try:
if self.options.landscape:
orientation = "landscape"
except:
pass
if number!=None:
(root,ext) = os.path.splitext(self.args[argn])
fname = '%s.%03d%s'%(root,number,ext)
else:
fname = self.args[argn]
try:
size=self.options.size
except:
size="a4"
plot = PyGMT.Canvas(fname,size=size,orientation=orientation)
try:
if self.options.verbose:
plot.verbose = True
except:
pass
plot.defaults['LABEL_FONT_SIZE']='12p'
plot.defaults['ANNOT_FONT_SIZE']='10p'
return plot
def cffile(self,argn=0):
"""Load CF file.
argn: number of argument holding CF file name."""
infile = CFloadfile(self.args[argn])
if hasattr(self.options,'llx'):
if self.options.llx != None:
infile.ll_xy = list(self.options.llx)
if hasattr(self.options,'urx'):
if self.options.urx != None:
infile.ur_xy = list(self.options.urx)
if hasattr(self.options,'llg'):
if self.options.llg != None:
infile.ll_geo = list(self.options.llg)
if hasattr(self.options,'urg'):
if self.options.urg != None:
infile.ur_geo = list(self.options.urg)
return infile
def cfprofile(self,argn=0):
"""Load CF profile.
argn: number of argument holding CF file name."""
# load profile data
xdata = []
ydata = []
infile = file(self.options.profname)
for line in CFreadlines(infile):
l = line.split()
xdata.append(float(l[0]))
ydata.append(float(l[1]))
infile.close()
try:
xrange=self.options.xrange
except:
xrange=None
if xrange==None:
xrange=[None,None]
profile = CFloadprofile(self.args[argn],xdata,ydata,projected=self.options.prof_is_projected,interval=self.options.interval,xrange=xrange)
return profile
def vars(self,cffile,varn=0):
"""Get variable.
cffile: CF netCDF file
varn: variable number
"""
try:
var = cffile.getvar(self.options.vars[varn])
except KeyError:
self.parser.error("Cannot find variable %s in file %s"%(self.options.vars[varn],cffile.fname))
try:
if self.options.colourmap == 'None':
var.colourmap = 'auto'
elif self.options.colourmap != None:
var.colourmap = self.options.colourmap
except:
var.colourmap = 'auto'
var.pmt = self.options.pmt
return var
def profs(self,cffile,varn=0):
"""Get profiles.
cffile: CF netCDF profile file
varn: variable number
"""
prof = cffile.getprofile(self.options.vars[varn])
prof.pmt = self.options.pmt
try:
prof.showpmp = self.options.showpmp
except:
pass
try:
if self.options.colourmap == 'None':
prof.colourmap = '.__auto.cpt'
elif self.options.colourmap != None:
prof.colourmap = self.options.colourmap
except:
prof.colourmap = '.__auto.cpt'
return prof
def times(self,cffile,timen=0):
"""Get time slice.
timen: time number."""
if self.options.times != None:
return cffile.timeslice(self.options.times[timen])
elif self.options.timeslice !=None:
return self.options.timeslice[timen]
else:
return 0
|
glimmer-cism/PyCF
|
PyCF/CF_options.py
|
Python
|
gpl-2.0
| 14,671
|
[
"NetCDF"
] |
2fb71ebd18957c0c313fc79f97145da3a569fd13bbd4734a076342eaa224c461
|
# Orca
#
# Copyright (C) 2010-2013 Igalia, S.L.
#
# Author: Alejandro Pinheiro Iglesias <apinheiro@igalia.com>
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010-2013 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import time
import orca.orca as orca
import orca.scripts.toolkits.clutter as clutter
from .formatting import Formatting
class Script(clutter.Script):
def __init__(self, app):
clutter.Script.__init__(self, app)
self._activeDialog = (None, 0) # (Accessible, Timestamp)
self._activeDialogLabels = {} # key == hash(obj), value == name
def getFormatting(self):
"""Returns the formatting strings for this script."""
return Formatting(self)
def skipObjectEvent(self, event):
"""Determines whether or not this event should be skipped due to
being redundant, part of an event flood, etc."""
try:
role = event.source.getRole()
except:
pass
else:
# We must handle all dialogs ourselves in this script.
if role == pyatspi.ROLE_DIALOG:
return False
return clutter.Script.skipObjectEvent(self, event)
def _presentDialogLabel(self, event):
try:
role = event.source.getRole()
name = event.source.name
except:
return False
activeDialog, timestamp = self._activeDialog
if not activeDialog or role != pyatspi.ROLE_LABEL:
return False
obj = hash(event.source)
if name == self._activeDialogLabels.get(obj):
return True
isDialog = lambda x: x and x.getRole() == pyatspi.ROLE_DIALOG
parentDialog = pyatspi.utils.findAncestor(event.source, isDialog)
if activeDialog == parentDialog:
self.presentMessage(name)
self._activeDialogLabels[obj] = name
return True
return False
def onNameChanged(self, event):
"""Callback for object:property-change:accessible-name events."""
if self._presentDialogLabel(event):
return
clutter.Script.onNameChanged(self, event)
def onShowingChanged(self, event):
"""Callback for object:state-changed:showing accessibility events."""
if not event.detail1:
return
try:
role = event.source.getRole()
name = event.source.name
except:
return
# When entering overview with many open windows, we get quite
# a few state-changed:showing events for nameless panels. The
# act of processing these by the default script causes us to
# present nothing, and introduces a significant delay before
# presenting the Top Bar button when Ctrl+Alt+Tab was pressed.
if role == pyatspi.ROLE_PANEL and not name:
return
# We cannot count on events or their order from dialog boxes.
# Therefore, the only way to reliably present a dialog is by
# ignoring the events of the dialog itself and keeping track
# of the current dialog.
activeDialog, timestamp = self._activeDialog
if not event.detail1 and event.source == activeDialog:
self._activeDialog = (None, 0)
self._activeDialogLabels = {}
return
if activeDialog and role == pyatspi.ROLE_LABEL and event.detail1:
if self._presentDialogLabel(event):
return
clutter.Script.onShowingChanged(self, event)
def onSelectedChanged(self, event):
"""Callback for object:state-changed:selected accessibility events."""
try:
state = event.source.getState()
except:
return
# Some buttons, like the Wikipedia button, claim to be selected but
# lack STATE_SELECTED. The other buttons, such as in the Dash and
# event switcher, seem to have the right state. Since the ones with
# the wrong state seem to be things we don't want to present anyway
# we'll stop doing so and hope we are right.
if event.detail1:
if state.contains(pyatspi.STATE_SELECTED):
orca.setLocusOfFocus(event, event.source)
return
clutter.Script.onSelectedChanged(self, event)
def onFocusedChanged(self, event):
"""Callback for object:state-changed:focused accessibility events."""
if not event.detail1:
return
obj = event.source
try:
role = obj.getRole()
name = obj.name
except:
return
# The dialog will get presented when its first child gets focus.
if role == pyatspi.ROLE_DIALOG:
return
if role == pyatspi.ROLE_MENU_ITEM and not name \
and not self.utilities.labelsForObject(obj):
isRealFocus = lambda x: x and x.getRole() == pyatspi.ROLE_SLIDER
descendant = pyatspi.findDescendant(obj, isRealFocus)
if descendant:
orca.setLocusOfFocus(event, descendant)
return
# This is to present dialog boxes which are, to the user, newly
# activated. And if something is claiming to be focused that is
# not in a dialog, that's good to know as well, so update our
# state regardless.
activeDialog, timestamp = self._activeDialog
if not activeDialog:
isDialog = lambda x: x and x.getRole() == pyatspi.ROLE_DIALOG
dialog = pyatspi.utils.findAncestor(obj, isDialog)
self._activeDialog = (dialog, time.time())
if dialog:
orca.setLocusOfFocus(None, dialog)
labels = self.utilities.unrelatedLabels(dialog)
for label in labels:
self._activeDialogLabels[hash(label)] = label.name
clutter.Script.onFocusedChanged(self, event)
|
ruibarreira/linuxtrail
|
usr/lib/python3/dist-packages/orca/scripts/apps/gnome-shell/script.py
|
Python
|
gpl-3.0
| 6,767
|
[
"ORCA"
] |
6a1cd19e8bf904c69dafa1251d7d322d695be4e8baeb530bd374fb2273344726
|
#!/usr/bin/env python
import math, numpy as np
#from enthought.mayavi import mlab
import matplotlib.pyplot as pp
import matplotlib.cm as cm
import matplotlib
import scipy.linalg as lin
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import tf
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.transforms as tr
import hrl_lib.matplotlib_util as mpu
import pickle
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from hrl_haptic_manipulation_in_clutter_msgs.msg import SkinContact
from hrl_haptic_manipulation_in_clutter_msgs.msg import TaxelArray
from m3skin_ros.msg import TaxelArray as TaxelArray_Meka
from hrl_msgs.msg import FloatArrayBare
from geometry_msgs.msg import Point
from geometry_msgs.msg import Vector3
def compute_contact_regions(arr_2d, threshold):
mask = arr_2d > threshold
label_im, nb_labels = ni.label(mask)
return label_im, nb_labels
def compute_obj_labels(label_im, nb_labels):
i=1
local_nb_label_first_obj = -1
local_nb_label_second_obj = -1
index = np.zeros(nb_labels)
row,col = np.shape(label_im)
while i <= nb_labels:
j=0
while j < row:
k=0
while k < col:
if label_im[j][k] == i:
index[i-1] = index[i-1]+1
k=k+1
j=j+1
i=i+1
temp=0
max_index_1 = np.max(index)
#print max_index_1
while temp < len(index):
if index[temp] == max_index_1 and max_index_1 > 0:
local_nb_label_first_obj = temp+1
temp = temp+1
index[local_nb_label_first_obj-1]=-1
max_index_2 = np.max(index)
#print max_index_2
temp=0
while temp < len(index):
if index[temp] == max_index_2 and max_index_2 > 0:
local_nb_label_second_obj = temp+1
temp = temp+1
return local_nb_label_first_obj, local_nb_label_second_obj, max_index_1, max_index_2
def compute_resultant_force_magnitudes(force_arr, label_im, nb_label):
total_force = ni.sum(force_arr, label_im, nb_label)
return total_force
def compute_max_force(force_arr, label_im, nb_label):
max_force = ni.maximum(force_arr, label_im, nb_label)
return max_force
def compute_center_of_pressure(cx_arr, cy_arr, cz_arr, label_im,
nb_label):
cx = ni.mean(cx_arr, label_im, nb_label)
cy = ni.mean(cy_arr, label_im, nb_label)
cz = ni.mean(cz_arr, label_im, nb_label)
contact_vector = np.column_stack([cx, cy, cz])
return contact_vector
def track_object_connected_component(cx_arr, cy_arr, cz_arr, r1, t1, label_im, nb_label_first_obj1, nb_label_second_obj1, total_contact_first_obj1, total_contact_second_obj1):
global cop_global_first_obj_prev
global cop_global_second_obj_prev
global iterindex
cop_local_first_obj1 = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_first_obj1)
cop_global_first_obj1 = r1*(cop_local_first_obj1.T) + t1
cop_local_second_obj1 = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_second_obj1)
cop_global_second_obj1 = r1*(cop_local_second_obj1.T) + t1
#print lin.norm(cop_global_first_obj1-cop_global_first_obj_prev)
if (lin.norm(cop_global_first_obj1-cop_global_first_obj_prev) >= lin.norm(cop_global_first_obj1 - cop_global_second_obj_prev)) and (iterindex > 0):
#if (lin.norm(cop_global_second_obj1-cop_global_second_obj_prev) >= lin.norm(cop_global_second_obj1 - cop_global_first_obj_prev)) and (iterindex > 0):
#if ((lin.norm(cop_global_first_obj1-cop_global_first_obj_prev) >= lin.norm(cop_global_first_obj1 - cop_global_second_obj_prev)) or (lin.norm(cop_global_second_obj1-cop_global_second_obj_prev) >= lin.norm(cop_global_second_obj1 - cop_global_first_obj_prev))) and (iterindex > 0):
#if (lin.norm(cop_global_first_obj1-cop_global_first_obj_prev) > 0.005) and (iterindex > 0):
#print "Need to be Exchanged: ", total_contact_first_obj1, " compared to ", total_contact_second_obj1
#print "Need to be Exchanged: ", nb_label_first_obj1, " compared to ", nb_label_second_obj1
temp1 = nb_label_first_obj1
temp2 = total_contact_first_obj1
nb_label_first_obj1 = nb_label_second_obj1
total_contact_first_obj1 = total_contact_second_obj1
nb_label_second_obj1 = temp1
total_contact_second_obj1 = temp2
cop_global_first_obj_prev = cop_global_second_obj1
cop_global_second_obj_prev = cop_global_first_obj1
print "The connected component corresponding to the first object is not the largest"
else:
cop_global_first_obj_prev = cop_global_first_obj1
cop_global_second_obj_prev = cop_global_second_obj1
#print "The largest connected component is the first object"
iterindex = iterindex + 1
return nb_label_first_obj1, nb_label_second_obj1, total_contact_first_obj1, total_contact_second_obj1
def callback(data, callback_args):
rospy.loginfo('Getting data!')
tf_lstnr = callback_args
sc = SkinContact()
sc.header.frame_id = '/torso_lift_link' # has to be this and no other coord frame.
sc.header.stamp = data.header.stamp
t1, q1 = tf_lstnr.lookupTransform(sc.header.frame_id,
data.header.frame_id,
rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
force_vectors = np.row_stack([data.forces_x, data.forces_y, data.forces_z])
contact_vectors = np.row_stack([data.centers_x, data.centers_y, data.centers_z]).reshape((3,16,24))
fmags = ut.norm(force_vectors)
force_arr = fmags.reshape((16,24))
cx_arr = contact_vectors[0]
cy_arr = contact_vectors[1]
cz_arr = contact_vectors[2]
label_im, nb_labels = compute_contact_regions(force_arr, 1.0)
nb_label_first_object, nb_label_second_object, total_contact_first_object, total_contact_second_object = compute_obj_labels(label_im,nb_labels)
nb_label_first_obj, nb_label_second_obj, total_contact_first_obj, total_contact_second_obj = track_object_connected_component(cx_arr, cy_arr, cz_arr, r1, t1, label_im, nb_label_first_object, nb_label_second_object, total_contact_first_object, total_contact_second_object)
#if (nb_label_first_obj == nb_label_second_object) and (nb_label_second_obj == nb_label_first_object):
#print "Correctly Exchanged, Now: ", total_contact_first_obj, " compared to ", total_contact_second_obj
#print "Correctly Exchanged, Now: ", nb_label_first_obj, " compared to ", nb_label_second_obj
cop_local_first_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_first_obj)
cop_global_first_obj = r1*(cop_local_first_obj.T) + t1
cop_local_second_obj = compute_center_of_pressure(cx_arr,cy_arr,cz_arr,label_im,nb_label_second_obj)
cop_global_second_obj = r1*(cop_local_second_obj.T) + t1
total_force_first_obj = compute_resultant_force_magnitudes(force_arr,label_im,nb_label_first_obj)
total_force_second_obj = compute_resultant_force_magnitudes(force_arr,label_im,nb_label_second_obj)
max_force_first_obj = compute_max_force(force_arr,label_im,nb_label_first_obj)
max_force_second_obj = compute_max_force(force_arr,label_im,nb_label_second_obj)
max_force_first_obj_temp = compute_max_force(force_arr,label_im,nb_label_first_object)
max_force_second_obj_temp = compute_max_force(force_arr,label_im,nb_label_second_object)
mean_force_first_obj = total_force_first_obj/total_contact_first_obj
mean_force_second_obj = total_force_second_obj/total_contact_second_obj
#if (nb_label_first_obj == nb_label_second_object) and (nb_label_second_obj == nb_label_first_object):
#print "New Max. Forces, 1st object: ", max_force_first_obj, " compared to 2nd object: ", max_force_second_obj
#print "Old Max. Forces, 1st object: ", max_force_first_obj_temp, " compared to 2nd object: ", max_force_second_obj_temp, "\n"
global time
time = time + 0.01
time_instant_data_first_obj = [time,total_force_first_obj,mean_force_first_obj,max_force_first_obj,total_contact_first_obj,cop_global_first_obj[0],cop_global_first_obj[1],cop_global_first_obj[2]]
time_instant_data_second_obj = [time,total_force_second_obj,mean_force_second_obj,max_force_second_obj,total_contact_second_obj,cop_global_second_obj[0],cop_global_second_obj[1],cop_global_second_obj[2]]
global time_varying_data_first_obj
time_varying_data_first_obj = np.row_stack([time_varying_data_first_obj, time_instant_data_first_obj])
global time_varying_data_second_obj
time_varying_data_second_obj = np.row_stack([time_varying_data_second_obj, time_instant_data_second_obj])
def tracking_point():
rospy.loginfo('Tracking Distance!')
ta1 = time_varying_data_first_obj
ta2 = time_varying_data_second_obj
k = 0
for i in ta1[:,0]:
if i != ta1[-1,0]:
instant_dist_first_obj = math.sqrt((ta1[k+1,5]-ta1[1,5])**2 + (ta1[k+1,6]-ta1[1,6])**2 + (ta1[k+1,7]-ta1[1,7])**2)
time_instant_tracker_first_obj = [ta1[k+1,0], instant_dist_first_obj]
instant_dist_second_obj = math.sqrt((ta2[k+1,5]-ta2[1,5])**2 + (ta2[k+1,6]-ta2[1,6])**2 + (ta2[k+1,7]-ta2[1,7])**2)
time_instant_tracker_second_obj = [ta2[k+1,0], instant_dist_second_obj]
global time_varying_tracker_first_obj
time_varying_tracker_first_obj = np.row_stack([time_varying_tracker_first_obj, time_instant_tracker_first_obj])
global time_varying_tracker_second_obj
time_varying_tracker_second_obj = np.row_stack([time_varying_tracker_second_obj, time_instant_tracker_second_obj])
k=k+1
def savedata():
rospy.loginfo('Saving data!')
global time_varying_data_first_obj
ut.save_pickle(time_varying_data_first_obj, '/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_data_black_foam_blue_cup_fixed_first_object_trial_5.pkl')
global time_varying_data_second_obj
ut.save_pickle(time_varying_data_second_obj, '/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_data_black_foam_blue_cup_fixed_second_object_trial_5.pkl')
global time_varying_tracker_first_obj
ut.save_pickle(time_varying_tracker_first_obj, '/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_tracking_data_black_foam_blue_cup_fixed_first_object_trial_5.pkl')
global time_varying_tracker_second_obj
ut.save_pickle(time_varying_tracker_second_obj, '/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_tracking_data_black_foam_blue_cup_fixed_second_object_trial_5.pkl')
def plotdata():
rospy.loginfo('Plotting data!')
# New Objects (Two Objects)
# First_Object
ta = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_data_black_foam_blue_cup_fixed_first_object_trial_5.pkl')
ta2 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_tracking_data_black_foam_blue_cup_fixed_first_object_trial_5.pkl')
# Second_Object
ta3 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_data_black_foam_blue_cup_fixed_second_object_trial_5.pkl')
ta4 = ut.load_pickle('/home/tapo/svn/robot1_data/usr/tapo/data/New_Experiments_Qi/Two_Objects/black_foam_blue_cup/reverse/new4/time_varying_tracking_data_black_foam_blue_cup_fixed_second_object_trial_5.pkl')
matplotlib.rcParams['font.size'] = 24
mpu.figure(1)
pp.title('Time-Varying Force',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Max Force',fontsize='24')
pp.plot(ta[0:,0], ta[0:,3], ta3[0:,0], ta3[0:,3], linewidth=3.0)
pp.legend(["First Object", "Second Object"], loc=7)
#pp.axis([3,5,0,8])
pp.grid('True')
mpu.figure(2)
pp.title('Time-Varying Contact',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('No. of Contact Regions',fontsize='24')
pp.plot(ta[0:,0], ta[0:,4], ta3[0:,0], ta3[0:,4], linewidth=3.0)
pp.legend(["First Object", "Second Object"], loc=4)
#pp.axis([3,5,2,16])
pp.grid('True')
mpu.figure(3)
pp.title('Point Tracker',fontsize='24')
pp.xlabel('Time (s)',fontsize='24')
pp.ylabel('Contact Point Distance',fontsize='24')
pp.plot(ta2[0:,0], ta2[0:,1], ta4[0:,0], ta4[0:,1], linewidth=3.0)
pp.legend(["First Object", "Second Object"], loc=4)
#pp.axis([3,5,0.02,0.185])
pp.grid('True')
def getdata():
rospy.init_node('time_varying_data_two_objects', anonymous=True)
tf_lstnr = tf.TransformListener()
rospy.Subscriber("/skin_patch_forearm_right/taxels/forces", TaxelArray_Meka, callback, callback_args = (tf_lstnr))
rospy.spin()
if __name__ == '__main__':
time = 0
time_varying_data_first_obj = [0,0,0,0,0,0,0,0]
time_varying_data_second_obj = [0,0,0,0,0,0,0,0]
time_varying_tracker_first_obj = [0,0]
time_varying_tracker_second_obj = [0,0]
cop_global_first_obj_prev = [0,0,0]
cop_global_second_obj_prev = [0,0,0]
iterindex = 0
getdata()
tracking_point()
savedata()
plotdata()
pp.show()
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/skin_related/Cody_Data/time_varying_data_two_objects_new.py
|
Python
|
mit
| 13,725
|
[
"Mayavi"
] |
e65831564908fe9457fee61ba4696918be0fe8c4df242bf29a6740e8218c4279
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ramaplot.Ramaplot.py
#
# Copyright (C) 2015 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Generates one or more Ramachandran figures to specifications provided in
a YAML file.
"""
################################### MODULES ###################################
from __future__ import absolute_import,division,print_function,unicode_literals
import matplotlib
if __name__ == "__main__":
__package__ = str("ramaplot")
import ramaplot
from .myplotspec.FigureManager import FigureManager
################################### CLASSES ###################################
class RamachandranFigureManager(FigureManager):
"""
Manages the generation of Ramachandran figures.
.. todo:
- Population preset
"""
from .myplotspec.manage_defaults_presets import manage_defaults_presets
from .myplotspec.manage_kwargs import manage_kwargs
defaults = """
draw_figure:
subplot_kw:
autoscale_on: False
multiplot: False
multi_xticklabels: [-180,-90,0,90,180]
multi_yticklabels: [-180,-90,0,90,180]
draw_subplot:
xlabel: Φ
xticks: [-180,-90,0,90,180]
ylabel: Ψ
yticks: [-180,-90,0,90,180]
ylabel_kw:
va: center
y2ticks: [-180,-90,0,90,180]
y2ticklabels: []
y2label_kw:
rotation: 270
draw_dataset:
heatmap_kw:
cmap: afmhot
edgecolors: none
rasterized: True
vmin: 0
vmax: 5
partner_kw:
position: right
colorbar_kw:
zticks: [0,1,2,3,4,5]
ztick_params:
bottom: off
top: off
left: off
right: off
zlabel: 'ΔG (kcal/mol)'
contour_kw:
colors: '0.25'
levels: [1, 2, 3, 4, 5]
linestyles: solid
mask_kw:
cmap: Greys_r
edgecolors: none
rasterized: True
vmin: 0
vmax: 1
outline_kw:
color: black
plot_kw:
marker: 'o'
ls: None
mew: 0
mfc: [0.5,0.5,0.5]
ms: 0.5
rasterized: True
label_kw:
x: 165
y: -170
text_kw:
ha: right
va: bottom
border_lw: 1
"""
available_presets = """
ff99SB:
class: appearance
help: Draw heatmap in style of AMBER ff99SB paper
draw_dataset:
heatmap_kw:
cmap: !!python/object/apply:ramaplot.cmap_ff99SB []
contour: False
potential_energy:
class: content
help: Plot potential energy as a function of Φ,Ψ
draw_dataset:
heatmap: True
heatmap_kw:
cmap: bone
vmin: 0
vmax: 5
colorbar_kw:
zticks: [0,1,2,3,4,5]
zlabel: 'ΔU (kcal/mol)'
contour: True
contour_kw:
colors: '0.25'
levels: [1, 2, 3, 4, 5]
linestyles: solid
mask: True
outline: False
free_energy:
class: content
help: Plot free energy as a function of Φ,Ψ
draw_dataset:
heatmap: True
heatmap_kw:
cmap: afmhot
vmin: 0
vmax: 5
colorbar_kw:
zticks: [0,1,2,3,4,5]
zlabel: 'ΔG (kcal/mol)'
contour: True
contour_kw:
colors: '0.25'
levels: [1, 2, 3, 4, 5]
linestyles: solid
mask: True
outline: False
diff:
class: content
help: Plot difference between two datasets
draw_dataset:
kind: diff
max_fe: 5
heatmap_kw:
cmap: RdBu_r
vmin: -5
vmax: 5
colorbar_kw:
zticks: [-5,-4,-3,-2,-1,0,1,2,3,4,5]
zlabel: 'ΔΔG (kcal/mol)'
contour_kw:
levels: [-5,-4,-3,-2,-1,0,1,2,3,4,5]
mask: True
outline: True
sampling:
class: content
help: Plot sampling as a function of Φ,Ψ
draw_dataset:
heatmap: False
heatmap_kw:
cmap: afmhot_r
vmin: 0
vmax: 5
contour: False
mask: True
mask_kw:
cmap: Greys
outline: False
plot: True
bond:
class: content
help: Plot average value of a bond as a function of Φ,Ψ
draw_dataset:
kind: cdl
heatmap_kw:
cmap: RdBu
vmin: 0
vmax: 3
contour: False
mask: True
outline: True
bond_CN:
help: Plot average C-N bond as a function of Φ,Ψ
extends: angle
draw_dataset:
heatmap_kw:
vmin: 1.31
vmax: 1.35
colorbar_kw:
zticks: [1.32, 1.33, 1.34]
zlabel: C-N (Å)
bond_CN_extended:
help: Plot average C-N bond as a function of Φ,Ψ; range extended to
support proline's longer bond
extends: bond
draw_dataset:
heatmap_kw:
vmin: 1.31
vmax: 1.40
colorbar_kw:
zticks: [1.32, 1.34, 1.36, 1.38]
zlabel: C-N (Å)
bond_NA:
help: Plot average N-Cα bond as a function of Φ,Ψ
extends: bond
draw_dataset:
heatmap_kw:
vmin: 1.43
vmax: 1.49
colorbar_kw:
zticks: [1.44, 1.45, 1.46, 1.47, 1.48]
zlabel: N-Cα (Å)
bond_AB:
help: Plot average Cα-Cβ bond as a function of Φ,Ψ
extends: bond
draw_dataset:
heatmap_kw:
vmin: 1.51
vmax: 1.55
colorbar_kw:
zticks: [1.52, 1.53, 1.54]
zlabel: Cα-Cβ (Å)
bond_AC:
help: Plot average Cα-C bond as a function of Φ,Ψ
extends: bond
draw_dataset:
heatmap_kw:
vmin: 1.50
vmax: 1.54
colorbar_kw:
zticks: [1.51, 1.52, 1.53]
zlabel: Cα-C (Å)
bond_CO:
help: Plot average C-O bond as a function of Φ,Ψ
extends: bond
draw_dataset:
heatmap_kw:
vmin: 1.22
vmax: 1.25
colorbar_kw:
zticks: [1.23, 1.24]
zlabel: C-O (Å)
angle:
class: content
help: Plot average value of an angle as a function of Φ,Ψ
draw_dataset:
kind: cdl
heatmap_kw:
cmap: RdBu
vmin: 110
vmax: 130
contour: False
mask: True
outline: True
angle_CNA:
help: Plot average C-N-Cα angle as a function of Φ,Ψ
extends: angle
draw_dataset:
heatmap_kw:
vmin: 118
vmax: 127
colorbar_kw:
zticks: [119,121,123,125]
zlabel: C-N-Cα (°)
angle_NAB:
help: Plot average N-Cα-Cβ angle as a function of Φ,Ψ
extends: angle
draw_dataset:
heatmap_kw:
vmin: 108
vmax: 115
colorbar_kw:
zticks: [109,110,111,112,113,114]
zlabel: N-Cα-Cβ (°)
angle_NAB_extended:
help: Plot average N-Cα-Cβ angle as a function of Φ,Ψ; range
extended to support proline's smaller angle
extends: angle
draw_dataset:
heatmap_kw:
vmin: 101
vmax: 115
colorbar_kw:
zticks: [102,105,108,111,114]
zlabel: N-Cα-Cβ (°)
angle_NAC:
help: Plot average N-Cα-C angle as a function of Φ,Ψ
extends: angle
draw_dataset:
heatmap_kw:
vmin: 106
vmax: 117
colorbar_kw:
zticks: [107,109,111,113,115]
zlabel: N-Cα-C (°)
angle_BAC:
help: Plot average Cβ-Cα-C angle as a function of Φ,Ψ
extends: angle
draw_dataset:
heatmap_kw:
vmin: 109
vmax: 118
colorbar_kw:
zticks: [110,112,114,116]
zlabel: Cβ-Cα-C (°)
angle_ACO:
help: Plot average Cα-C-O angle as a function of Φ,Ψ
extends: angle
draw_dataset:
heatmap_kw:
vmin: 117
vmax: 124
colorbar_kw:
zticks: [118,119,120,121,122,123]
zlabel: Cα-C-O (°)
angle_ACN:
help: Plot average Cα-C-N angle as a function of Φ,Ψ
extends: angle
draw_dataset:
heatmap_kw:
vmin: 113
vmax: 122
colorbar_kw:
zticks: [114,116,118,120]
zlabel: Cα-C-N (°)
angle_OCN:
help: Plot average OCN angle as a function of Φ,Ψ
extends: angle
draw_dataset:
heatmap_kw:
vmin: 118
vmax: 126
colorbar_kw:
zticks: [119,121,123,125]
zlabel: O-C-N (°)
angle_OCN:
help: Plot average OCN angle as a function of Φ,Ψ
extends: angle
draw_dataset:
heatmap_kw:
vmin: 120
vmax: 125
colorbar_kw:
zticks: [121,122,123,124]
zlabel: O-C-N (°)
omega:
class: content
help: Plot average value of omega as a function of Φ (i),Ψ (i-1)
draw_subplot:
xlabel: '$Φ_{i}$'
ylabel: '$Ψ_{i-1}$'
draw_dataset:
kind: cdl
heatmap_kw:
cmap: RdBu
vmin: 170
vmax: 190
colorbar_kw:
zticks: [170,175,180,185,190]
zlabel: ω
contour: False
mask: True
outline: True
image:
class: content
help: Plot image of a Ramachandran plot, typically from a publication
draw_dataset:
kind: image
heatmap_kw:
cmap: Greys_r
vmin: 0
vmax: 1
contour: False
mask: False
outline: False
poster:
class: target
help: Poster (width = 4.6", height = 4.3")
inherits: poster
draw_figure:
left: 1.20
sub_width: 3.00
right: 0.40
bottom: 1.00
sub_height: 3.00
top: 0.30
draw_subplot:
ylabel_kw:
rotation: horizontal
notebook:
class: target
inherits: notebook
draw_figure:
left: 0.50
sub_width: 1.59
wspace: 0.10
right: 0.25
bottom: 0.40
sub_height: 1.59
hspace: 0.10
top: 0.25
multiplot: True
draw_subplot:
legend: False
ylabel_kw:
rotation: horizontal
y2label_kw:
labelpad: 6
draw_dataset:
colorbar_kw:
ztick_fp: 8r
zlabel_fp: 10b
label_kw:
fp: 10b
border_lw: 1
presentation_wide:
class: target
inherits: presentation_wide
draw_figure:
left: 1.50
sub_width: 3.40
wspace: 0.30
sub_height: 3.40
hspace: 0.30
bottom: 1.20
multiplot: True
draw_subplot:
legend: False
ylabel_kw:
rotation: horizontal
labelpad: 10
y2label_kw:
labelpad: 20
draw_dataset:
contour_kw:
linewidths: 2
plot_kw:
ms: 2
colorbar_kw:
ztick_fp: 20r
zlabel_fp: 24b
ztick_params:
pad: 5
zlw: 3
zlabel_kw:
labelpad: 30
label_kw:
fp: 24b
border_lw: 3
colorbar_right:
class: appearance
help: Draw colorbar to right of plot
draw_dataset:
colorbar: True
partner_kw:
position: right
colorbar_kw:
zlabel_kw:
rotation: 270
labelpad: 14
colorbar_top:
class: appearance
help: Draw colorbar above plot
draw_dataset:
colorbar: True
partner_kw:
position: top
colorbar_kw:
zlabel_kw:
rotation: 0
labelpad: 5
colorbar_bottom:
class: appearance
help: Draw colorbar below plot
draw_dataset:
colorbar: True
partner_kw:
position: bottom
hspace: 0.5
"""
@manage_defaults_presets()
@manage_kwargs()
def draw_dataset(self, subplot, label=None, kind="wham",
nan_to_max=True, heatmap=True, colorbar=False, contour=True,
mask=False, outline=False, plot=False, verbose=1, debug=0, **kwargs):
"""
"""
from copy import copy
from warnings import warn
import numpy as np
import six
from .myplotspec import get_color
from .myplotspec.axes import set_colorbar
from .AnalyticalDataset import AnalyticalDataset
from .CDLDataset import CDLDataset
from .DiffDataset import DiffDataset
from .ImageDataset import ImageDataset
from .NDRDDataset import NDRDDataset
from .PDistDataset import PDistDataset
from .WHAMDataset import WHAMDataset
dataset_classes = {"analytical": AnalyticalDataset,
"cdl": CDLDataset,
"diff": DiffDataset,
"image": ImageDataset,
"ndrd": NDRDDataset,
"pdist": PDistDataset,
"wham": WHAMDataset}
# Load data
kind = kind.lower()
dataset_kw = kwargs.get("dataset_kw", kwargs)
if "infile" in kwargs:
dataset_kw["infile"] = kwargs["infile"]
dataset = self.load_dataset(dataset_classes[kind],
dataset_classes=dataset_classes,
verbose=verbose, debug=debug, **dataset_kw)
if dataset is None:
return
# Draw heatmap and colorbar
if heatmap:
if not (hasattr(dataset, "dist") and hasattr(dataset, "x_bins")
and hasattr(dataset, "y_bins")):
warn("'heatmap' is enabled but dataset does not have the "
"necessary attributes 'mask', 'x_bins', and 'y_bins', "
"skipping.")
else:
heatmap_kw = copy(kwargs.get("heatmap_kw", {}))
heatmap_dist = copy(dataset.dist)
if nan_to_max:
heatmap_dist[np.isnan(heatmap_dist)] = np.nanmax(
heatmap_dist)
pcolormesh = subplot.pcolormesh(dataset.x_bins, dataset.y_bins,
heatmap_dist.T, zorder=0.1, **heatmap_kw)
if colorbar:
if not hasattr(subplot, "_mps_partner_subplot"):
from .myplotspec.axes import add_partner_subplot
add_partner_subplot(subplot, **kwargs)
set_colorbar(subplot, pcolormesh, **kwargs)
# Draw contour
if contour:
if not (hasattr(dataset, "dist") and hasattr(dataset, "x_centers")
and hasattr(dataset, "y_centers")):
warn("'contour' is enabled but dataset does not have the "
"necessary attributes 'dist', 'x_centers', and "
"'y_centers', skipping.")
else:
contour_kw = copy(kwargs.get("contour_kw", {}))
if "levels" in kwargs:
contour_kw["levels"] = kwargs.pop("color")
elif "levels" not in contour_kw:
contour_kw["levels"] = range(0,
int(np.ceil(np.nanmax(dataset.dist))))
contour = subplot.contour(dataset.x_centers, dataset.y_centers,
dataset.dist.T, zorder=0.2, **contour_kw)
for collection in contour.collections:
for path in collection.get_paths():
if np.all(path.vertices[0] == path.vertices[-1]):
path.vertices = np.append(path.vertices,
[path.vertices[1]], axis=0)
# Draw mask
if mask:
if not (hasattr(dataset, "mask") and hasattr(dataset, "x_bins")
and hasattr(dataset, "y_bins")):
warn("'mask' is enabled but dataset does not have the "
"necessary attributes 'mask', 'x_bins', and 'y_bins', "
"skipping.")
else:
mask_kw = copy(kwargs.get("mask_kw", {}))
subplot.pcolormesh(dataset.x_bins, dataset.y_bins,
dataset.mask.T, zorder=0.3, **mask_kw)
# Draw outline
if outline:
if not (hasattr(dataset, "mask") and hasattr(dataset, "x_bins")
and hasattr(dataset, "y_bins")):
warn("'outline' is enabled but dataset does not have the "
"necessary attributes 'mask', 'x_bins', and 'y_bins', "
"skipping.")
else:
outline_kw = copy(kwargs.get("outline_kw", {}))
for x in range(dataset.dist.shape[0]):
for y in range(dataset.dist.shape[1]):
if not dataset.mask[x,y]:
if (x != 0
and y != dataset.mask.shape[1]
and dataset.mask[x-1,y]):
subplot.plot(
[dataset.x_bins[x], dataset.x_bins[x]],
[dataset.y_bins[y], dataset.y_bins[y+1]],
zorder=0.4, **outline_kw)
if (x != dataset.mask.shape[0] - 1
and y != dataset.mask.shape[1]
and dataset.mask[x+1,y]):
subplot.plot(
[dataset.x_bins[x+1], dataset.x_bins[x+1]],
[dataset.y_bins[y], dataset.y_bins[y+1]],
zorder=0.4, **outline_kw)
if (x != dataset.mask.shape[0]
and y != 0
and dataset.mask[x,y-1]):
subplot.plot(
[dataset.x_bins[x], dataset.x_bins[x+1]],
[dataset.y_bins[y], dataset.y_bins[y]],
zorder=0.4, **outline_kw)
if (x != dataset.mask.shape[0]
and y != dataset.mask.shape[1] - 1
and dataset.mask[x,y+1]):
subplot.plot(
[dataset.x_bins[x], dataset.x_bins[x+1]],
[dataset.y_bins[y+1], dataset.y_bins[y+1]],
zorder=0.4, **outline_kw)
# Draw plot
if plot:
if not (hasattr(dataset, "x") and hasattr(dataset, "y")):
warn("'plot' is enabled but dataset does not have the "
"necessary attributes 'x' and 'y', skipping.")
else:
plot_kw = copy(kwargs.get("plot_kw", {}))
subplot.plot(dataset.x, dataset.y, **plot_kw)
if label is not None:
from .myplotspec.text import set_text
label_kw = kwargs.get("label_kw", {})
set_text(subplot, s=label, **label_kw)
def main(self):
"""
Provides command-line functionality.
"""
import argparse
from inspect import getmodule
parser = argparse.ArgumentParser(
description = getmodule(self.__class__).__doc__,
formatter_class = argparse.RawTextHelpFormatter)
super(RamachandranFigureManager, self).main(parser=parser)
#################################### MAIN #####################################
if __name__ == "__main__":
RamachandranFigureManager().main()
|
ASinanSaglam/Ramaplot
|
Ramaplot.py
|
Python
|
bsd-3-clause
| 20,979
|
[
"Amber"
] |
d6ec7396349972014879225d132ca052e2be9397e27af6c9ad3e8a8e887bec98
|
import os
from os import path
from fsgamesys.amiga.amiga import Amiga
# from fsgamesys.amiga.amigaconfig import AmigaConfig
from fsgamesys.amiga.amigaconstants import AmigaConstants
from fsgamesys.amiga.config import Config
from fsgamesys.amiga.launchhandler import (
amiga_path_to_host_path,
encode_file_comment,
system_configuration,
)
from fsgamesys.amiga.types import ConfigType
from fsgamesys.amiga.whdload import prepare_whdload_system_volume
from fsgamesys.amiga.xpkmaster import XpkMaster
from fsgamesys.files.installablefile import InstallableFile
from fsgamesys.files.installablefiles import InstallableFiles
from fsgamesys.network import is_http_url
def prepare_amiga_hard_drives(config: ConfigType, files: InstallableFiles):
for i in range(Amiga.MAX_HARD_DRIVES):
prepare_amiga_hard_drive(config, i, files)
if not Config(config).whdload_args():
# The WHDLoad override setting and config key does not quite
# follow the usual semantics of configs/settings unfortunately, so
# we really want whdload_quit_key to be cleared when not using
# WHDLoad. Otherwise the emulator will try to quit everything with
# the WHDLoad quit key (when overriden).
Config(config).set_whdload_quit_key("")
def maybe_disable_save_states(config: ConfigType):
# Save states cannot currently be used with temporarily created
# hard drives, as HD paths are embedded into the save states, and
# restoring the save state causes problems.
if Config(config).unsafe_save_states():
# User explicitly allows unsafe save states, not disabling
return
Config(config).set_save_states(False)
def prepare_amiga_hard_drive(
config: ConfigType, drive_index: int, files: InstallableFiles
):
src = Config(config).hard_drive_n(drive_index)
if not src:
return
if is_http_url(src):
# name = src.rsplit("/", 1)[-1]
# name = unquote(name)
# self.on_progress(gettext("Downloading {0}...".format(name)))
# dest = os.path.join(self.temp_dir, name)
# Downloader.install_file_from_url(src, dest)
# src = dest
raise NotImplementedError()
elif src.startswith("hd://game/"):
prepare_game_hard_drive(config, drive_index, src, files)
maybe_disable_save_states(config)
elif src.startswith("file_list:"):
prepare_game_hard_drive(config, drive_index, src, files)
maybe_disable_save_states(config)
elif src.startswith("hd://template/workbench/"):
# self.prepare_workbench_hard_drive(drive_index, src)
raise NotImplementedError()
maybe_disable_save_states(config)
elif src.startswith("hd://template/empty/"):
# self.prepare_empty_hard_drive(drive_index, src)
raise NotImplementedError()
maybe_disable_save_states(config)
else:
# raise NotImplementedError()
# dest_dir = "DH0"
configKey = "hard_drive_{}".format(drive_index)
# if not config.get(configKey, ""):
config[configKey] = src
# if ext in Archive.extensions:
# print("zipped hard drive", src)
# self.unpack_hard_drive(index, src)
# self.disable_save_states()
# elif src.endswith("HardDrive"):
# print("XML-described hard drive", src)
# self.unpack_hard_drive(index, src)
# self.disable_save_states()
# else:
# src = Paths.expand_path(src)
# self.config[key] = src
if drive_index == 0:
prepare_dh0_files(config, files)
# def get_file_list_for_game_uuid(game_uuid):
# # FIXME: This is an ugly hack, we should already be told what
# # database to use.
# try:
# game_database = self.fsgs.get_game_database()
# values = game_database.get_game_values_for_uuid(game_uuid)
# except LookupError:
# try:
# game_database = self.fsgs.game_database("CD32")
# values = game_database.get_game_values_for_uuid(game_uuid)
# except LookupError:
# game_database = self.fsgs.game_database("CDTV")
# values = game_database.get_game_values_for_uuid(game_uuid)
# file_list = json.loads(values["file_list"])
# return file_list
def prepare_dh0_files(config: ConfigType, files: InstallableFiles):
whdload_args = Config(config).whdload_args()
hdinst_args = Config(config).hdinst_args()
hd_startup = Config(config).hdinst_args()
if not whdload_args and not hdinst_args and not hd_startup:
return
# dest_dir = os.path.join(self.temp_dir, "DH0")
# dest_dir = "HardDrives/DH0"
dest_dir = "DH0"
if not config.get("hard_drive_0", ""):
config["hard_drive_0"] = dest_dir
config["hard_drive_0_label"] = "Workbench"
print("prepare_dh0_files, dest_dir = ", dest_dir)
s_dir = os.path.join(dest_dir, "S")
# if not os.path.exists(s_dir):
# os.makedirs(s_dir)
files[s_dir + os.sep] = InstallableFile.fromDirectory()
libs_dir = os.path.join(dest_dir, "Libs")
# if not os.path.exists(libs_dir):
# os.makedirs(libs_dir)
files[libs_dir + os.sep] = InstallableFile.fromDirectory()
devs_dir = os.path.join(dest_dir, "Devs")
# if not os.path.exists(devs_dir):
# os.makedirs(devs_dir)
files[devs_dir + os.sep] = InstallableFile.fromDirectory()
fonts_dir = os.path.join(dest_dir, "Fonts")
# if not os.path.exists(fonts_dir):
# os.makedirs(fonts_dir)
files[fonts_dir + os.sep] = InstallableFile.fromDirectory()
if hd_startup:
config["hard_drive_0_priority"] = "6"
# don't copy setpatch by default, at least not yet
pass
else:
# self.hd_requirements.add("setpatch")
# Signal to the launch system that SetPatch should be included in
# Startup-Sequence.
config["__setpatch__"] = "1"
prepare_setpatch(dest_dir, files)
# workbenchVersion: Optional[str] = None
# amiga_model = Config(config).amiga_model()
# if amiga_model in ["A500+", "A600"]:
# workbenchVersion = "2.04"
# elif amiga_model.startswith("A1200"):
# workbenchVersion = "3.0"
# elif amiga_model.startswith("A4000"):
# workbenchVersion = "3.0"
# # else:
# # workbenchVersion = None
# FIXME:
# if "workbench" in self.hd_requirements:
# if not workbench_version:
# raise Exception(
# "Unsupported workbench version for hd_requirements"
# )
# extractor = WorkbenchExtractor(self.fsgs)
# extractor.install_version(workbench_version, dest_dir)
# # install_workbench_files(self.fsgs, dest_dir, workbench_version)
# for req in self.hd_requirements:
# if "/" in req:
# # assume a specific workbench file
# extractor = WorkbenchExtractor(self.fsgs)
# extractor.install_version(
# workbench_version,
# dest_dir,
# [req],
# install_startup_sequence=False,
# )
if whdload_args:
# prepare_whdload_files(dest_dir, s_dir)
prepare_whdload_system_volume(
dest_dir, s_dir, config=config, files=files
)
elif hdinst_args:
# self.write_startup_sequence(s_dir, hdinst_args)
raise NotImplementedError("hdinst_args needs fixing")
elif hd_startup:
# self.write_startup_sequence(s_dir, hd_startup)
raise NotImplementedError("hd_startup needs fixing")
# FIXME: Test!
if "xpkmaster.library" in Config(config).hd_requirements():
XpkMaster.addFiles(dest_dir, files=files)
system_configuration_file = os.path.join(devs_dir, "system-configuration")
# if not os.path.exists(system_configuration_file):
# with open(system_configuration_file, "wb") as f:
# f.write(system_configuration)
files[system_configuration_file] = InstallableFile.fromData(
data=system_configuration
)
# def copy_setpatch(self, base_dir):
# dest = os.path.join(base_dir, "C")
# if not os.path.exists(dest):
# os.makedirs(dest)
# dest = os.path.join(dest, "SetPatch")
# for checksum in workbench_disks_with_setpatch_39_6:
# path = self.fsgs.file.find_by_sha1(checksum)
# if path:
# print("found WB DISK with SetPatch 39.6 at", path)
# try:
# input_stream = self.fsgs.file.open(path)
# except Exception:
# traceback.print_exc()
# else:
# wb_data = input_stream.read()
# # archive = Archive(path)
# # if archive.exists(path):
# # f = archive.open(path)
# # wb_data = f.read()
# # f.close()
# if self.extract_setpatch_39_6(wb_data, dest):
# print("SetPatch installed")
# self.setpatch_installed = True
# break
# else:
# print("WARNING: extract_setpatch_39_6 returned False")
# # else:
# # print("oops, path does not exist")
# else:
# print("WARNING: did not find SetPatch 39.6")
# FIXME: This requires that we index .ADF files. Alternatively, we need to
# register a function instead of "data" / "sha1" in order to extract the
# SetPatch file
def prepare_setpatch(hd_dir: str, files: InstallableFiles):
# FIXME: Only optional if not using netplay?
files[path.join(hd_dir, "C", "SetPatch")] = InstallableFile(
sha1=AmigaConstants.SETPATCH_39_6_SHA1, optional=True
)
def prepare_game_hard_drive(
config: ConfigType, drive_index: int, src: str, files: InstallableFiles
):
print("prepare_game_hard_drive", drive_index, src)
if src.startswith("file_list:"):
_, _, drive = src.split("/")
# file_list = config.file_list()
else:
_, _, _, _, drive = src.split("/")
# file_list = get_file_list_for_game_uuid(game_uuid)
# file_list = config.file_list()
# raise NotImplementedError("hmm")
file_list = Config(config).file_list()
drive_prefix = drive + "/"
# dir_name = "DH{0}".format(drive_index)
dir_name = drive
# dir_path = os.path.join(self.temp_dir, dir_name)
# dir_path = os.path.join(config.run_dir(), "HardDrives", dir_name)
# dir_path = os.path.join("HardDrives", dir_name)
dir_path = dir_name
for file_entry in file_list:
name = file_entry["name"]
# Only process files from the correct drive
if not name.startswith(drive_prefix):
continue
# extract Amiga relative path and convert each path component
# to host file name (where needed).
# amiga_rel_path = name[len(drive_prefix) :]
# print("amiga_rel_path", amiga_rel_path)
# amiga_rel_parts = amiga_rel_path.split("/")
# for i, part in enumerate(amiga_rel_parts):
# # part can be blank if amiga_rel_parts is a directory
# # (ending with /)
# if part:
# amiga_rel_parts[i] = amiga_filename_to_host_filename(part)
# amiga_rel_path = "/".join(amiga_rel_parts)
amiga_rel_path = amiga_path_to_host_path(name[len(drive_prefix) :])
dst_file = path.join(dir_path, amiga_rel_path.replace("/", os.sep))
print(repr(dst_file))
# x-Important to check the original name here and not the normalized path
# x-since normalization could have changed / to \ or even remove the
# x-trailing slash/backslash.
# if name.endswith("/"):
if dst_file.endswith(os.sep):
# os.makedirs(Paths.encode(dst_file))
files[dst_file] = InstallableFile.fromDirectory()
continue
# if not os.path.exists(os.path.dirname(dst_file)):
# os.makedirs(os.path.dirname(dst_file))
# sha1 = file_entry["sha1"]
# current_task.set_progress(os.path.basename(dst_file))
# current_task.set_progress(amiga_rel_path)
# self.fsgs.file.copy_game_file("sha1://{0}".format(sha1), dst_file)
# files.append({
# "path": dst_file,
# "sha1": file_entry["sha1"],
# "size": file_entry["size"]
# })
files[dst_file] = InstallableFile(
sha1=file_entry["sha1"], size=file_entry["size"]
)
# src_file = self.fsgs.file.find_by_sha1(sha1)
# if not os.path.exists(os.path.dirname(dst_file)):
# os.makedirs(os.path.dirname(dst_file))
# stream = self.fsgs.file.open(src_file)
# # archive = Archive(src_file)
# # f = archive.open(src_file)
# data = stream.read()
# assert hashlib.sha1(data).hexdigest() == sha1
# with open(dst_file, "wb") as out_file:
# out_file.write(data)
# noinspection SpellCheckingInspection
metadata = [
"----rwed",
" ",
"2000-01-01 00:00:00.00",
" ",
"",
"\n",
]
if "comment" in file_entry:
metadata[4] = encode_file_comment(file_entry["comment"])
# with open(dst_file + ".uaem", "wb") as out_file:
# out_file.write("".join(metadata).encode("UTF-8"))
data = "".join(metadata).encode("UTF-8")
files[dst_file + ".uaem"] = InstallableFile.fromData(data)
Config(config).set_hard_drive_n(
drive_index, os.path.join(config["run_dir"], dir_path)
)
|
FrodeSolheim/fs-uae-launcher
|
fsgamesys/amiga/harddrives.py
|
Python
|
gpl-2.0
| 13,642
|
[
"ADF"
] |
5c934655ad36ab7b1f3d0634c957de8a315b089f3b07887fbde688f22926e1ca
|
"""
=======================================
Purpose: make a histogram of fits image
=======================================
Input: fits image file
Output: histogram of pixel values
-------------------
*By: Jianrong Deng 20170601
-------------------
"""
import numpy as np
##############################################################################
# Set up matplotlib and use a nicer set of plot parameters
import matplotlib.pyplot as plt
#from astropy.visualization import astropy_mpl_style
#plt.style.use(astropy_mpl_style)
# read in fits data file
from astropy.io import fits
image_file = '/Users/jdeng/baiduCloudDisk/LAMOST/data/20150923/bias/rb-16r-20150923235754-10000-82496157.fit.gz'
##############################################################################
# Use `astropy.io.fits.info()` to display the structure of the file:
fits.info(image_file)
##############################################################################
# Generally the image information is located in the Primary HDU, also known
# as extension 0. Here, we use `astropy.io.fits.getdata()` to read the image
# data from this first extension using the keyword argument ``ext=0``:
image_data = fits.getdata(image_file, ext=0)
##############################################################################
# The data is now stored as a 2D numpy array. Print the dimensions using the
# shape attribute:
print(image_data.shape)
"""
### option for 'bins' in numpy histogram:
‘auto’: Maximum of the ‘sturges’ and ‘fd’ estimators. Provides good all around performance.
‘fd’ (Freedman Diaconis Estimator): Robust (resilient to outliers) estimator that takes into account data variability and data size.
‘sturges’: R’s default method, only accounts for data size. Only optimal for gaussian data and underestimates number of bins for large non-gaussian datasets.
Returns
-------
hist : array
The values of the histogram. See `density` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram.html#numpy.histogram
"""
# histogram our data with numpy
#
#hist, bins = np.histogram (image_data, 'auto')
#for i in range(len(bins)-1):
# print ( i, '\t\t:', bins[i], '\t\t: ', hist[i])
##############################################################################
# plot the histogram
plt.figure()
#plt.hist(image_data.flatten(), bins=400, range=[2100, 2500])
plt.hist(image_data.flatten(), bins=50)
#plt.colorbar()
#plt.xscale('log')
plt.yscale('log')
plt.show()
|
jianrongdeng/LAMOST
|
ana/scripts/histogram_fits-image.py
|
Python
|
gpl-3.0
| 2,680
|
[
"Gaussian"
] |
5d8773f3d141d979ced02613a18ccfd0ce34213fe53d464823b8aa6b567e7362
|
import math
import re # Used to fix SVG exporting
from PyQt4 import QtCore, QtGui, QtSvg
import qt4_circular_render as crender
import qt4_rect_render as rrender
from main import _leaf, NodeStyle, _FaceAreas
from qt4_gui import _NodeActions as _ActionDelegator
from qt4_face_render import update_node_faces, _FaceGroupItem, _TextFaceItem
import faces
## | General scheme of node content
## |==========================================================================================================================|
## | fullRegion |
## | nodeRegion |================================================================================|
## | | fullRegion ||
## | | nodeRegion |=======================================||
## | | | fullRegion |||
## | | | nodeRegion |||
## | | | |branch_length | nodeSize | facesRegion|||
## | | branch_length | nodesize|faces-right |=======================================||
## | | |(facesRegion)|=======================================||
## | | | fullRegion ||
## | | | nodeRegion ||
## | faces-top | | | | branch_length | nodeSize | facesRegion||
## | branch_length | NodeSize |faces-right | |=======================================||
## | faces-bottom | |(facesRegion)|================================================================================|
## | |=======================================| |
## | | fullRegion | |
## | | nodeRegion | |
## | | branch_length | nodeSize | facesRegion| |
## | |=======================================| |
## |==========================================================================================================================|
class _CircleItem(QtGui.QGraphicsEllipseItem, _ActionDelegator):
def __init__(self, node):
self.node = node
d = node.img_style["size"]
QtGui.QGraphicsEllipseItem.__init__(self, 0, 0, d, d)
_ActionDelegator.__init__(self)
self.setBrush(QtGui.QBrush(QtGui.QColor(self.node.img_style["fgcolor"])))
self.setPen(QtGui.QPen(QtGui.QColor(self.node.img_style["fgcolor"])))
class _RectItem(QtGui.QGraphicsRectItem, _ActionDelegator):
def __init__(self, node):
self.node = node
d = node.img_style["size"]
QtGui.QGraphicsRectItem.__init__(self, 0, 0, d, d)
_ActionDelegator.__init__(self)
self.setBrush(QtGui.QBrush(QtGui.QColor(self.node.img_style["fgcolor"])))
self.setPen(QtGui.QPen(QtGui.QColor(self.node.img_style["fgcolor"])))
class _SphereItem(QtGui.QGraphicsEllipseItem, _ActionDelegator):
def __init__(self, node):
self.node = node
d = node.img_style["size"]
r = d/2
QtGui.QGraphicsEllipseItem.__init__(self, 0, 0, d, d)
_ActionDelegator.__init__(self)
#self.setBrush(QtGui.QBrush(QtGui.QColor(self.node.img_style["fgcolor"])))
self.setPen(QtGui.QPen(QtGui.QColor(self.node.img_style["fgcolor"])))
gradient = QtGui.QRadialGradient(r, r, r,(d)/3,(d)/3)
gradient.setColorAt(0.05, QtCore.Qt.white);
gradient.setColorAt(1, QtGui.QColor(self.node.img_style["fgcolor"]));
self.setBrush(QtGui.QBrush(gradient))
# self.setPen(QtCore.Qt.NoPen)
class _EmptyItem(QtGui.QGraphicsItem):
def __init__(self, parent=None):
QtGui.QGraphicsItem.__init__(self)
self.setParentItem(parent)
# qt4.6+ Only
try:
self.setFlags(QtGui.QGraphicsItem.ItemHasNoContents)
except:
pass
def boundingRect(self):
return QtCore.QRectF(0,0,0,0)
def paint(self, *args, **kargs):
return
class _TreeItem(QtGui.QGraphicsRectItem):
def __init__(self, parent=None):
QtGui.QGraphicsRectItem.__init__(self)
self.setParentItem(parent)
self.n2i = {}
self.n2f = {}
class _NodeItem(_EmptyItem):
def __init__(self, node, parent):
_EmptyItem.__init__(self, parent)
self.node = node
self.nodeRegion = QtCore.QRectF()
self.facesRegion = QtCore.QRectF()
self.fullRegion = QtCore.QRectF()
self.highlighted = False
class _LineItem(QtGui.QGraphicsLineItem):
def paint(self, painter, option, widget):
QtGui.QGraphicsLineItem.paint(self, painter, option, widget)
class _PointerItem(QtGui.QGraphicsRectItem):
def __init__(self, parent=None):
QtGui.QGraphicsRectItem.__init__(self,0,0,0,0, parent)
self.color = QtGui.QColor("blue")
self._active = False
self.setBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
def paint(self, p, option, widget):
p.setPen(self.color)
p.drawRect(self.rect())
return
# Draw info text
font = QtGui.QFont("Arial",13)
text = "%d selected." % len(self.get_selected_nodes())
textR = QtGui.QFontMetrics(font).boundingRect(text)
if self.rect().width() > textR.width() and \
self.rect().height() > textR.height()/2 and 0: # OJO !!!!
p.setPen(QtGui.QPen(self.color))
p.setFont(QtGui.QFont("Arial",13))
p.drawText(self.rect().bottomLeft().x(),self.rect().bottomLeft().y(),text)
def get_selected_nodes(self):
selPath = QtGui.QPainterPath()
selPath.addRect(self.rect())
self.scene().setSelectionArea(selPath)
return [i.node for i in self.scene().selectedItems()]
def setActive(self,bool):
self._active = bool
def isActive(self):
return self._active
class _TreeScene(QtGui.QGraphicsScene):
def __init__(self):
QtGui.QGraphicsScene.__init__(self)
self.view = None
self.tree = None
def init_data(self, tree, img, n2i, n2f):
self.master_item = _EmptyItem()
self.view = None
self.tree = tree
self.n2i = n2i
self.n2f = n2f
self.img = img
# Initialize scene
self.buffer_node = None # Used to copy and paste
self.pointer = _PointerItem(self.master_item)
self.highlighter = QtGui.QGraphicsPathItem(self.master_item)
# Set the scene background
self.setBackgroundBrush(QtGui.QColor("white"))
#self.setBackgroundBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
# def mousePressEvent(self,e):
# pos = self.pointer.mapFromScene(e.scenePos())
# self.pointer.setRect(pos.x(),pos.y(),10,10)
# self.pointer.startPoint = QtCore.QPointF(pos.x(), pos.y())
# self.pointer.setActive(True)
# self.pointer.setVisible(True)
# QtGui.QGraphicsScene.mousePressEvent(self,e)
#
# def mouseReleaseEvent(self,e):
# curr_pos = self.pointer.mapFromScene(e.scenePos())
# x = min(self.pointer.startPoint.x(),curr_pos.x())
# y = min(self.pointer.startPoint.y(),curr_pos.y())
# w = max(self.pointer.startPoint.x(),curr_pos.x()) - x
# h = max(self.pointer.startPoint.y(),curr_pos.y()) - y
# if self.pointer.startPoint == curr_pos:
# self.pointer.setVisible(False)
# self.pointer.setActive(False)
# QtGui.QGraphicsScene.mouseReleaseEvent(self,e)
#
# def mouseMoveEvent(self,e):
# curr_pos = self.pointer.mapFromScene(e.scenePos())
# if self.pointer.isActive():
# x = min(self.pointer.startPoint.x(),curr_pos.x())
# y = min(self.pointer.startPoint.y(),curr_pos.y())
# w = max(self.pointer.startPoint.x(),curr_pos.x()) - x
# h = max(self.pointer.startPoint.y(),curr_pos.y()) - y
# self.pointer.setRect(x,y,w,h)
# QtGui.QGraphicsScene.mouseMoveEvent(self, e)
#
# def mouseDoubleClickEvent(self,e):
# QtGui.QGraphicsScene.mouseDoubleClickEvent(self,e)
def draw(self):
tree_item, self.n2i, self.n2f = render(self.tree, self.img)
if self.master_item:
self.removeItem(self.master_item)
self.master_item = _EmptyItem()
self.addItem(self.master_item)
tree_item.setParentItem(self.master_item)
def render(root_node, img, hide_root=False):
mode = img.mode
orientation = img.orientation
if not img.scale and img.tree_width:
fnode, max_dist = root_node.get_farthest_leaf(topology_only=\
img.force_topology)
if max_dist>0:
img.scale = img.tree_width / max_dist
else:
img.scale = 1
scale = img.scale
arc_span = img.arc_span
last_rotation = img.arc_start
layout_fn = img._layout_handler
parent = _TreeItem()
n2i = parent.n2i # node to items
n2f = parent.n2f # node to faces
parent.bg_layer = _EmptyItem(parent)
parent.tree_layer = _EmptyItem(parent)
parent.float_layer = _EmptyItem(parent)
parent.float_behind_layer = _EmptyItem(parent)
TREE_LAYERS = [parent.bg_layer, parent.tree_layer, parent.float_layer]
parent.bg_layer.setZValue(0)
parent.tree_layer.setZValue(2)
parent.float_behind_layer.setZValue(1)
parent.float_layer.setZValue(3)
visited = set()
to_visit = []
to_visit.append(root_node)
# This could be used to handle aligned faces in internal
# nodes.
virtual_leaves = 0
if img.show_branch_length:
bl_face = faces.AttrFace("dist", fsize=8, ftype="Arial", fgcolor="black", formatter = "%0.3g")
if img.show_branch_support:
su_face = faces.AttrFace("support", fsize=8, ftype="Arial", fgcolor="darkred", formatter = "%0.3g")
if img.show_leaf_name:
na_face = faces.AttrFace("name", fsize=10, ftype="Arial", fgcolor="black")
for n in root_node.traverse():
set_style(n, layout_fn)
if img.show_branch_length:
faces.add_face_to_node(bl_face, n, 0, position="branch-top")
if not _leaf(n) and img.show_branch_support:
faces.add_face_to_node(su_face, n, 0, position="branch-bottom")
if _leaf(n) and img.show_leaf_name:
faces.add_face_to_node(na_face, n, 0, position="branch-right")
if _leaf(n):# or len(n.img_style["_faces"]["aligned"]):
virtual_leaves += 1
rot_step = float(arc_span) / virtual_leaves
#rot_step = float(arc_span) / len([n for n in root_node.traverse() if _leaf(n)])
# ::: Precalculate values :::
depth = 1
while to_visit:
node = to_visit[-1]
finished = True
if node not in n2i:
# Set style according to layout function
item = n2i[node] = _NodeItem(node, parent.tree_layer)
item.setZValue(depth)
depth += 1
if node is root_node and hide_root:
pass
else:
set_node_size(node, n2i, n2f, img)
if not _leaf(node):
# visit children starting from left most to right
# most. Very important!! check all children[-1] and
# children[0]
for c in reversed(node.children):
if c not in visited:
to_visit.append(c)
finished = False
# :: pre-order code here ::
if not finished:
continue
else:
to_visit.pop(-1)
visited.add(node)
# :: Post-order visits. Leaves are already visited here ::
if mode == "c":
if _leaf(node):
crender.init_circular_leaf_item(node, n2i, n2f, last_rotation, rot_step)
last_rotation += rot_step
else:
crender.init_circular_node_item(node, n2i, n2f)
elif mode == "r":
if _leaf(node):
rrender.init_rect_leaf_item(node, n2i, n2f)
else:
rrender.init_rect_node_item(node, n2i, n2f)
if node is not root_node or not hide_root:
render_node_content(node, n2i, n2f, img)
mainRect = parent.rect()
if mode == "c":
tree_radius = crender.render_circular(root_node, n2i, rot_step)
mainRect.adjust( -tree_radius, -tree_radius, tree_radius, tree_radius)
else:
iwidth = n2i[root_node].fullRegion.width()
iheight = n2i[root_node].fullRegion.height()
mainRect.adjust(0, 0, iwidth, iheight)
tree_radius = iwidth
# The order by which the following methods IS IMPORTANT
render_floatings(n2i, n2f, img, parent.float_layer, parent.float_behind_layer)
aligned_region_width = render_aligned_faces(img, mainRect, parent.tree_layer, n2i, n2f)
render_backgrounds(img, mainRect, parent.bg_layer, n2i, n2f)
adjust_faces_to_tranformations(img, mainRect, n2i, n2f, TREE_LAYERS)
parent.setRect(mainRect)
parent.setPen(QtGui.QPen(QtCore.Qt.NoPen))
if img.rotation:
rect = parent.boundingRect()
x = rect.x() + rect.width()/2
y = rect.y() + rect.height()/2
parent.setTransform(QtGui.QTransform().translate(x, y).rotate(img.rotation).translate(-x, -y))
frame = QtGui.QGraphicsRectItem()
parent.setParentItem(frame)
mainRect = parent.mapToScene(mainRect).boundingRect()
mainRect.adjust(-img.margin_left, -img.margin_top, \
img.margin_right, img.margin_bottom)
add_legend(img, mainRect, frame)
add_title(img, mainRect, frame)
add_scale(img, mainRect, frame)
frame.setRect(mainRect)
# Draws a border around the tree
if not img.show_border:
frame.setPen(QtGui.QPen(QtCore.Qt.NoPen))
else:
frame.setPen(QtGui.QPen(QtGui.QColor("black")))
return frame, n2i, n2f
def adjust_faces_to_tranformations(img, mainRect, n2i, n2f, tree_layers):
if img.mode == "c":
rotate_inverted_faces(n2i, n2f, img)
elif img.mode == "r" and img.orientation == 1:
for layer in tree_layers:
layer.setTransform(QtGui.QTransform().translate(0, 0).scale(-1,1).translate(0, 0))
layer.moveBy(mainRect.width(),0)
for faceblock in n2f.itervalues():
for pos, fb in faceblock.iteritems():
fb.flip_hz()
def add_legend(img, mainRect, parent):
if img.legend:
legend = _FaceGroupItem(img.legend, None)
legend.setup_grid()
legend.render()
lg_w, lg_h = legend.get_size()
dw = max(0, lg_w-mainRect.width())
legend.setParentItem(parent)
if img.legend_position == 1:
mainRect.adjust(0, -lg_h, dw, 0)
legend.setPos(mainRect.topLeft())
elif img.legend_position == 2:
mainRect.adjust(0, -lg_h, dw, 0)
pos = mainRect.topRight()
legend.setPos(pos.x()-lg_w, pos.y())
elif img.legend_position == 3:
legend.setPos(mainRect.bottomLeft())
mainRect.adjust(0, 0, dw, lg_h)
elif img.legend_position == 4:
pos = mainRect.bottomRight()
legend.setPos(pos.x()-lg_w, pos.y())
mainRect.adjust(0, 0, dw, lg_h)
def add_title(img, mainRect, parent):
if img.title:
title = _FaceGroupItem(img.title, None)
title.setup_grid()
title.render()
lg_w, lg_h = title.get_size()
dw = max(0, lg_w-mainRect.width())
title.setParentItem(parent)
mainRect.adjust(0, -lg_h, dw, 0)
title.setPos(mainRect.topLeft())
def add_legend(img, mainRect, parent):
if img.legend:
legend = _FaceGroupItem(img.legend, None)
legend.setup_grid()
legend.render()
lg_w, lg_h = legend.get_size()
dw = max(0, lg_w-mainRect.width())
legend.setParentItem(parent)
if img.legend_position == 1:
mainRect.adjust(0, -lg_h, dw, 0)
legend.setPos(mainRect.topLeft())
elif img.legend_position == 2:
mainRect.adjust(0, -lg_h, dw, 0)
pos = mainRect.topRight()
legend.setPos(pos.x()-lg_w, pos.y())
elif img.legend_position == 3:
legend.setPos(mainRect.bottomLeft())
mainRect.adjust(0, 0, dw, lg_h)
elif img.legend_position == 4:
pos = mainRect.bottomRight()
legend.setPos(pos.x()-lg_w, pos.y())
mainRect.adjust(0, 0, dw, lg_h)
def add_scale(img, mainRect, parent):
if img.show_scale:
length=50
scaleItem = _EmptyItem()
customPen = QtGui.QPen(QtGui.QColor("black"), 1)
line = QtGui.QGraphicsLineItem(scaleItem)
line2 = QtGui.QGraphicsLineItem(scaleItem)
line3 = QtGui.QGraphicsLineItem(scaleItem)
line.setPen(customPen)
line2.setPen(customPen)
line3.setPen(customPen)
line.setLine(0, 5, length, 5)
line2.setLine(0, 0, 0, 10)
line3.setLine(length, 0, length, 10)
scale_text = "%0.2f" % (float(length) / img.scale)
scale = QtGui.QGraphicsSimpleTextItem(scale_text)
scale.setParentItem(scaleItem)
scale.setPos(0, 10)
if img.force_topology:
wtext = "Force topology is enabled!\nBranch lengths do not represent original values."
warning_text = QtGui.QGraphicsSimpleTextItem(wtext)
warning_text.setFont(QtGui.QFont("Arial", 8))
warning_text.setBrush( QtGui.QBrush(QtGui.QColor("darkred")))
warning_text.setPos(0, 32)
warning_text.setParentItem(scaleItem)
scaleItem.setParentItem(parent)
dw = max(0, length-mainRect.width())
scaleItem.setPos(mainRect.bottomLeft())
mainRect.adjust(0,0,dw, length)
def rotate_inverted_faces(n2i, n2f, img):
for node, faceblock in n2f.iteritems():
item = n2i[node]
if item.rotation > 90 and item.rotation < 270:
for pos, fb in faceblock.iteritems():
fb.rotate(180)
def render_backgrounds(img, mainRect, bg_layer, n2i, n2f):
if img.mode == "c":
max_r = mainRect.width()/2
else:
max_r = mainRect.width()
for node, item in n2i.iteritems():
if _leaf(node):
first_c = n2i[node]
last_c = n2i[node]
else:
first_c = n2i[node.children[0]]
last_c = n2i[node.children[-1]]
if img.mode == "c":
h = item.effective_height
angle_start = first_c.full_start
angle_end = last_c.full_end
parent_radius = getattr(n2i.get(node.up, None), "radius", 0)
base = parent_radius + item.nodeRegion.width()
if node.img_style["node_bgcolor"].upper() != "#FFFFFF":
bg1 = crender._ArcItem()
r = math.sqrt(base**2 + h**2)
bg1.set_arc(0, 0, parent_radius, r, angle_start, angle_end)
bg1.setParentItem(item.content.bg)
bg1.setPen(QtGui.QPen(QtGui.QColor(node.img_style["node_bgcolor"])))
bg1.setBrush(QtGui.QBrush(QtGui.QColor(node.img_style["node_bgcolor"])))
if node.img_style["faces_bgcolor"].upper() != "#FFFFFF":
bg2 = crender._ArcItem()
r = math.sqrt(base**2 + h**2)
bg2.set_arc(0, 0, parent_radius, item.radius, angle_start, angle_end)
bg2.setParentItem(item.content)
bg2.setPen(QtGui.QPen(QtGui.QColor(node.img_style["faces_bgcolor"])))
bg2.setBrush(QtGui.QBrush(QtGui.QColor(node.img_style["faces_bgcolor"])))
if node.img_style["bgcolor"].upper() != "#FFFFFF":
bg = crender._ArcItem()
bg.set_arc(0, 0, parent_radius, max_r, angle_start, angle_end)
bg.setPen(QtGui.QPen(QtGui.QColor(node.img_style["bgcolor"])))
bg.setBrush(QtGui.QBrush(QtGui.QColor(node.img_style["bgcolor"])))
bg.setParentItem(bg_layer)
bg.setZValue(item.zValue())
if img.mode == "r":
if node.img_style["bgcolor"].upper() != "#FFFFFF":
bg = QtGui.QGraphicsRectItem()
pos = item.content.mapToScene(0, 0)
bg.setPos(pos.x(), pos.y())
bg.setRect(0, 0, max_r-pos.x(), item.fullRegion.height())
bg.setPen(QtGui.QPen(QtGui.QColor(node.img_style["bgcolor"])))
bg.setBrush(QtGui.QBrush(QtGui.QColor(node.img_style["bgcolor"])))
bg.setParentItem(bg_layer)
bg.setZValue(item.zValue())
def set_node_size(node, n2i, n2f, img):
scale = img.scale
min_separation = img.min_leaf_separation
item = n2i[node]
if img.force_topology:
branch_length = item.branch_length = float(scale)
else:
branch_length = item.branch_length = float(node.dist * scale)
# Organize faces by groups
faceblock = update_node_faces(node, n2f, img)
aligned_height = 0
if _leaf(node):
if img.mode == "r":
aligned_height = faceblock["aligned"].h
elif img.mode == "c":
# aligned faces in circular mode are adjusted afterwords. The
# min radius of the largest aligned faces will be calculated.
pass
# Total height required by the node. I cannot sum up the height of
# all elements, since the position of some of them are forced to
# be on top or at the bottom of branches. This fact can produce
# and unbalanced nodeRegion center. Here, I only need to know
# about the imbalance size to correct node height. The center will
# be calculated later according to the parent position.
top_half_h = ( (node.img_style["size"]/2) +
node.img_style["hz_line_width"]/2 +
faceblock["branch-top"].h )
bottom_half_h =( (node.img_style["size"]/2) +
node.img_style["hz_line_width"]/2 +
faceblock["branch-bottom"].h )
h1 = top_half_h + bottom_half_h
h2 = max(faceblock["branch-right"].h, \
aligned_height, \
min_separation )
h = max(h1, h2)
imbalance = abs(top_half_h - bottom_half_h)
if imbalance > h2/2:
h += imbalance - (h2/2)
# This adds a vertical margin around the node elements
h += img.branch_vertical_margin
# Total width required by the node
w = sum([max(branch_length + node.img_style["size"],
faceblock["branch-top"].w + node.img_style["size"],
faceblock["branch-bottom"].w + node.img_style["size"],
),
faceblock["branch-right"].w]
)
w += node.img_style["vt_line_width"]
# rightside faces region
item.facesRegion.setRect(0, 0, faceblock["branch-right"].w, h)
# Node region
item.nodeRegion.setRect(0, 0, w, h)
# This is the node total region covered by the node
item.fullRegion.setRect(0, 0, w, h)
def render_node_content(node, n2i, n2f, img):
style = node.img_style
item = n2i[node]
item.content = _EmptyItem(item)
nodeR = item.nodeRegion
facesR = item.facesRegion
center = item.center
branch_length = item.branch_length
# Node points
ball_size = style["size"]
lw = style["vt_line_width"]
ball_start_x = nodeR.width() - facesR.width() - ball_size - lw
if ball_size:
if node.img_style["shape"] == "sphere":
node_ball = _SphereItem(node)
elif node.img_style["shape"] == "circle":
node_ball = _CircleItem(node)
elif node.img_style["shape"] == "square":
node_ball = _RectItem(node)
node_ball.setPos(ball_start_x, center-(ball_size/2.0))
#from qt4_gui import _BasicNodeActions
#node_ball.delegate = _BasicNodeActions()
#node_ball.setAcceptsHoverEvents(True)
#node_ball.setCursor(QtCore.Qt.PointingHandCursor)
else:
node_ball = None
# Branch line to parent
pen = QtGui.QPen()
set_pen_style(pen, style["hz_line_type"])
pen.setColor(QtGui.QColor(style["hz_line_color"]))
pen.setWidth(style["hz_line_width"])
pen.setCapStyle(QtCore.Qt.FlatCap)
#pen.setCapStyle(QtCore.Qt.RoundCap)
#pen.setJoinStyle(QtCore.Qt.RoundJoin)
hz_line = _LineItem()
hz_line.setPen(pen)
# the -vt_line_width is to solve small imperfections in line
# crosses.
hz_line.setLine(0, center,
branch_length, center)
if img.complete_branch_lines_when_necessary:
extra_line = _LineItem(branch_length, center, ball_start_x, center)
pen = QtGui.QPen()
item.extra_branch_line = extra_line
set_pen_style(pen, img.extra_branch_line_type)
pen.setColor(QtGui.QColor(img.extra_branch_line_color))
pen.setCapStyle(QtCore.Qt.FlatCap)
pen.setWidth(style["hz_line_width"])
extra_line.setPen(pen)
else:
extra_line = None
# Attach branch-right faces to child
fblock_r = n2f[node]["branch-right"]
fblock_r.render()
fblock_r.setPos(nodeR.width() - facesR.width(), \
center-fblock_r.h/2)
# Attach branch-bottom faces to child
fblock_b = n2f[node]["branch-bottom"]
fblock_b.render()
fblock_b.setPos(0, center + style["hz_line_width"]/2)
# Attach branch-top faces to child
fblock_t = n2f[node]["branch-top"]
fblock_t.render()
fblock_t.setPos(0, center-fblock_t.h-style["hz_line_width"]/2)
# Vertical line
if not _leaf(node):
if img.mode == "c":
vt_line = QtGui.QGraphicsPathItem()
elif img.mode == "r":
vt_line = _LineItem(item)
first_child_part = n2i[node.children[0]]
last_child_part = n2i[node.children[-1]]
c1 = first_child_part.start_y + first_child_part.center
c2 = last_child_part.start_y + last_child_part.center
fx = nodeR.width()-node.img_style["vt_line_width"]/2
vt_line.setLine(fx, c1,\
fx, c2)
pen = QtGui.QPen()
set_pen_style(pen, style["vt_line_type"])
pen.setColor(QtGui.QColor(style["vt_line_color"]))
pen.setWidth(style["vt_line_width"])
pen.setCapStyle(QtCore.Qt.FlatCap)
vt_line.setPen(pen)
item.vt_line = vt_line
else:
vt_line = None
item.bg = QtGui.QGraphicsItemGroup()
item.movable_items = [] #QtGui.QGraphicsItemGroup()
item.static_items = [] #QtGui.QGraphicsItemGroup()
# Items fow which coordinates are exported in the image map
item.mapped_items = [node_ball, fblock_r, fblock_b, fblock_t]
for i in [node_ball, fblock_r, fblock_b, fblock_t]:
if i:
#item.movable_items.addToGroup(i)
item.movable_items.append(i)
i.setParentItem(item.content)
for i in [vt_line, extra_line, hz_line]:
if i:
#item.static_items.addToGroup(i)
item.static_items.append(i)
i.setParentItem(item.content)
#item.movable_items.setParentItem(item.content)
#item.static_items.setParentItem(item.content)
def set_pen_style(pen, line_style):
if line_style == 0:
pen.setStyle(QtCore.Qt.SolidLine)
elif line_style == 1:
pen.setStyle(QtCore.Qt.DashLine)
elif line_style == 2:
pen.setStyle(QtCore.Qt.DotLine)
def set_style(n, layout_func):
if not isinstance(getattr(n, "img_style", None), NodeStyle):
n.img_style = NodeStyle()
n._temp_faces = _FaceAreas()
if layout_func:
layout_func(n)
def render_floatings(n2i, n2f, img, float_layer, float_behind_layer):
#floating_faces = [ [node, fb["float"]] for node, fb in n2f.iteritems() if "float" in fb]
for node, faces in n2f.iteritems():
face_set = [ [float_layer, faces.get("float", None)],
[float_behind_layer, faces.get("float-behind",None)]]
for parent_layer,fb in face_set:
if not fb:
continue
item = n2i[node]
fb.setParentItem(parent_layer)
if item.extra_branch_line:
xtra = item.extra_branch_line.line().dx()
else:
xtra = 0
if img.mode == "c":
# Floatings are positioned over branches
crender.rotate_and_displace(fb, item.rotation, fb.h, item.radius - item.nodeRegion.width()+ xtra)
# Floatings are positioned starting from the node circle
#crender.rotate_and_displace(fb, item.rotation, fb.h, item.radius - item.nodeRegion.width())
elif img.mode == "r":
fb.setPos(item.content.mapToScene(0, item.center-(fb.h/2)))
z = item.zValue()
if not img.children_faces_on_top:
z = -z
fb.setZValue(z)
fb.update_columns_size()
fb.render()
def render_aligned_faces(img, mainRect, parent, n2i, n2f):
# Prepares and renders aligned face headers. Used to later
# place aligned faces
aligned_faces = [ [node, fb["aligned"]] for node, fb in n2f.iteritems()\
if fb["aligned"].column2faces and _leaf(node)]
# If no aligned faces, just return an offset of 0 pixels
if not aligned_faces:
return 0
# Load header and footer
if img.mode == "r":
tree_end_x = mainRect.width()
fb_head = _FaceGroupItem(img.aligned_header, None)
fb_head.setParentItem(parent)
fb_foot = _FaceGroupItem(img.aligned_foot, None)
fb_foot.setParentItem(parent)
surroundings = [[None,fb_foot], [None, fb_head]]
mainRect.adjust(0, -fb_head.h, 0, fb_foot.h)
else:
tree_end_x = mainRect.width()/2
surroundings = []
# Place aligned faces and calculates the max size of each
# column (needed to place column headers)
c2max_w = {}
maxh = 0
maxh_node = None
for node, fb in aligned_faces + surroundings:
if fb.h > maxh:
maxh = fb.h
maxh_node = node
for c, w in fb.c2max_w.iteritems():
c2max_w[c] = max(w, c2max_w.get(c,0))
extra_width = sum(c2max_w.values())
# If rect mode, render header and footer
if img.mode == "r":
if img.draw_aligned_faces_as_table:
fb_head.setup_grid(c2max_w)
fb_foot.setup_grid(c2max_w)
fb_head.render()
fb_head.setPos(tree_end_x, mainRect.top())
fb_foot.render()
fb_foot.setPos(tree_end_x, mainRect.bottom()-fb_foot.h)
if img.orientation == 1:
fb_head.flip_hz()
fb_foot.flip_hz()
elif img.mode == "c" and not img.allow_face_overlap:
angle = n2i[maxh_node].angle_span
rad, off = crender.get_min_radius(1, maxh, angle, tree_end_x)
extra_width += rad - tree_end_x
tree_end_x = rad
# Place aligned faces
for node, fb in aligned_faces:
item = n2i[node]
item.mapped_items.append(fb)
if img.draw_aligned_faces_as_table:
if img.aligned_table_style == 0:
fb.setup_grid(c2max_w, as_grid=True)
elif img.aligned_table_style == 1:
fb.setup_grid(c2max_w, as_grid=False)
fb.render()
fb.setParentItem(item.content)
if img.mode == "c":
if node.up in n2i:
x = tree_end_x - n2i[node.up].radius
else:
x = tree_end_x
#fb.moveBy(tree_end_x, 0)
elif img.mode == "r":
x = item.mapFromScene(tree_end_x, 0).x()
fb.setPos(x, item.center-(fb.h/2))
if img.draw_guiding_lines and _leaf(node):
# -1 is to connect the two lines, otherwise there is a pixel in between
guide_line = _LineItem(item.nodeRegion.width()-1, item.center, x, item.center)
pen = QtGui.QPen()
set_pen_style(pen, img.guiding_lines_type)
pen.setColor(QtGui.QColor(img.guiding_lines_color))
pen.setCapStyle(QtCore.Qt.FlatCap)
pen.setWidth(node.img_style["hz_line_width"])
guide_line.setPen(pen)
guide_line.setParentItem(item.content)
if img.mode == "c":
mainRect.adjust(-extra_width, -extra_width, extra_width, extra_width)
else:
mainRect.adjust(0, 0, extra_width, 0)
return extra_width
def get_tree_img_map(n2i):
node_list = []
face_list = []
nid = 0
for n, main_item in n2i.iteritems():
n.add_feature("_nid", str(nid))
for item in main_item.mapped_items:
if isinstance(item, _CircleItem) \
or isinstance(item, _SphereItem) \
or isinstance(item, _RectItem):
r = item.boundingRect()
rect = item.mapToScene(r).boundingRect()
x1 = rect.x()
y1 = rect.y()
x2 = rect.x() + rect.width()
y2 = rect.y() + rect.height()
node_list.append([x1, y1, x2, y2, nid, None])
elif isinstance(item, _FaceGroupItem):
if item.column2faces:
for f in item.childItems():
r = f.boundingRect()
rect = f.mapToScene(r).boundingRect()
x1 = rect.x()
y1 = rect.y()
x2 = rect.x() + rect.width()
y2 = rect.y() + rect.height()
if isinstance(f, _TextFaceItem):
face_list.append([x1, y1, x2, y2, nid, str(f.text())])
else:
face_list.append([x1, y1, x2, y2, nid, None])
nid += 1
return {"nodes": node_list, "faces": face_list}
|
xguse/ete
|
ete_dev/treeview/qt4_render.py
|
Python
|
gpl-3.0
| 35,308
|
[
"VisIt"
] |
11963b43292567c912cdaa79ecc99d8732faf03f3f7f8795eecab31890baa687
|
"""
A set of misc. methods in pure python to perform generic operations over datasets,
vectors, strings, etc. I have added a lot of stuff in it, and a little is used as of now.
You are free to take and use anything you like.
"""
from __future__ import division
import sys
import random
from numpy.linalg import inv, det
import math
sys.dont_write_bytecode = True
#addition of list items using recursion for both flat and not-flat lists
def add_rec(x):
y = 0
for a in x:
if type(a) == type([]):
y = y+add_rec(a)
else:
y = y+a
return y
#list flattening
def flat_x(x, y):
for a in x:
if type(a) == type([]):
flat_x(a, y)
else:
y.append(a)
return y
#fibbonaci sum
def fib_x(n):
if n == 0 or n == 1:
return 1
else:
return fib_x(n-1) + fib_x(n-2)
#count down using recursion
def count_x(n):
if n == 0:
print 'blast'
else:
print n
n -= 1
count_x(n)
#factorial rec
def fact_x(n):
if n == 0:
return 1
else:
return n * fact_x(n-1)
#digit sum using recursion
def digit_sum(n):
if n < 10:
return n
else:
return n % 10 + digit_sum(n//10)
#sort regular using insertionsort
def sort_x(x):
for a in range(1, len(x)):
current = x[a]
position = a
while position>0 and x[position-1]>current:
x[position]=x[position-1]
position = position-1
x[position] = current
return x
#reverse sorting using insertionsort
def rev_sort(x):
for a in range(1, len(x)):
current = x[a]
position = a
while position > 0 and x[position - 1] < current:
x[position] = x[position-1]
position = position - 1
x[position] = current
return x
#missing number in array
def missing_array(x):
sx = 0
min_ = 0
max_ = 0
for a in range(0, len(x)):
sx = sx+x[a]
if x[a] > max_:
max_ = x[a]
if x[a] < min_:
min_ = x[a]
rsx = (max_+min_)*(max_-min_+1)/2
return rsx - sx
#find 2 missing numbers in an array
def missing_array_2(x):
sx = 0
min_ = 1
max_ = 1
for a in x:
sx += a
if a > max_:
max_ = a
if a < min_:
min_ = a
rsx = (max_+min_)*(max_-min_+1)/2
z = (rsx-sx)/2
rsx1 = (z+0)*(z-0+1)/2
rsx2 = (max_+(z+1))*(max_-(z+1)+1)/2
sx1 = 0
sx2 = 0
for a in x:
if a < z:
sx1 += a
if a > z:
sx2 += a
first_ = rsx1-sx1
second_ = rsx2-sx2
return first_, second_
#max subset in an array
def max_sub(x):
c = 0
ci = 0
b = 0
bi = 0
si = 0
for a in range(0, len(x)):
if c + x[a] > 0:
c = c+x[a]
else:
c = 0
ci = a + 1
if c > b:
si = ci
bi = a + a
b = c
return b, x[si:bi]
#cumulative sum array
def cum_sum(x):
y = range(0, len(x))
for a in range(0, len(x)):
if a == 0:
y[a] = x[a]
else:
current = x[a]
y[a] = current+ y[a - 1]
return y
#finds max min in an array
def max_min(x):
min_ = 1
max_ = 1
for a in x:
if a > max_:
max_ = a
if a < min_:
min_ = a
return max_, min_
#downhill simplex algorithm for optimal function minimization
def fmin(F,xStart,side=0.1,tol=0.000006):
import numpy
n = len(xStart) # Number of variables
x = numpy.zeros((n+1,n))
f = numpy.zeros(n+1)
# Generate starting simplex
x[0] = xStart
for i in range(1,n+1):
x[i] = xStart
x[i,i-1] = xStart[i-1] + side
# Compute values of F at the vertices of the simplex
for i in range(n+1): f[i] = F(x[i])
# Main loop
for k in range(500):
# Find highest and lowest vertices
iLo = numpy.argmin(f)
iHi = numpy.argmax(f)
# Compute the move vector d
d = (-(n+1)*x[iHi] + numpy.sum(x,axis=0))/n
# Check for convergence
if math.sqrt(numpy.dot(d,d)/n) < tol: return x[iLo]
# Try reflection
xNew = x[iHi] + 2.0*d
fNew = F(xNew)
if fNew <= f[iLo]: # Accept reflection
x[iHi] = xNew
f[iHi] = fNew
# Try expanding the reflection
xNew = x[iHi] + d
fNew = F(xNew)
if fNew <= f[iLo]: # Accept expansion
x[iHi] = xNew
f[iHi] = fNew
else:
# Try reflection again
if fNew <= f[iHi]: # Accept reflection
x[iHi] = xNew
f[iHi] = fNew
else:
# Try contraction
xNew = x[iHi] + 0.5*d
fNew = F(xNew)
if fNew <= f[iHi]: # Accept contraction
x[iHi] = xNew
f[iHi] = fNew
else:
# Use shrinkage
for i in range(len(x)):
if i != iLo:
x[i] = (x[i] - x[iLo])*0.5
f[i] = F(x[i])
return x[iLo]
#set of functions reserved for normalization procedures
class Functions(object):
def __init__(self):
return
#flatten an array
def flatten(self, x, out_):
for a in x:
if type(a) == type([]):
out_ = self.flatten(a, out_)
else:
out_.append(a)
return out_
#mean of an array
def mean_(self, x):
return float(sum(x))/len(x)
#standard deviation of an array
def std_(self, x):
m_x = self.mean_(x)
return math.sqrt(sum([math.pow((a-m_x), 2) for a in x])/len(x))
#mean of a n x m array with axis defined (1 being row wise mean, 0 being column wise,
#and no axis definition means for the flattened array/matrix
def mean_nm(self, x, axis=False):
if axis == 0:
return map(self.mean_, zip(*x))
elif axis == 1:
return map(self.mean_, x)
elif not axis:
return self.mean_(self.flatten(x, []))
else:
return False
#standard deviation of a n x m array with axis defined (1 being row wise, 0 being column wise,
#and no axis definition means for the flattened array/matrix
def std_nm(self, x, axis=False):
if axis == 0:
return map(self.std_, zip(*x))
elif axis == 1:
return map(self.std_, x)
elif not axis:
return self.std_(self.flatten(x, []))
else:
return False
def normalize_1d(self, x):
k = []
x_mean = self.mean_(x)
for a in x:
k.append(a-x_mean)
l = []
x_std = self.std_(x)
for a in k:
l.append(float(a)/x_std)
return l
def normalize_(self, x):
#data -= data.mean(axis=0)
k = []
x_mean = self.mean_nm(x, axis=0)
for a in range(0, len(x)):
l =[]
for b in range(0, len(x[a])):
l.append(float(x[a][b]) - float(x_mean[b]))
k.append(l)
#data /= data.std(axis=0)
l_ = []
x_std = self.std_nm(k, axis=0)
for a in range(0, len(k)):
g = []
for b in range(0, len(k[a])):
g.append(float(float(k[a][b]) / float(x_std[b])))
l_.append(g)
return l_
#inv
def inv_(self, x):
return map(list, inv(x))
#det
def det_(self, x):
return det(x)
#matrix multiplication (not a generic one)
def mult_mat(self, a,b):
try:
zip_b = zip(*b)
return [[sum(ele_a*ele_b for ele_a, ele_b in zip(row_a, col_b)) for col_b in zip_b] for row_a in a]
except:
j_ = []
for n_ in a:
i = []
for m_,c in zip(n_, b):
k = m_*c
i.append(k)
j_.append(sum(i))
return j_
#argmin implementation
def arg_min(self, x):
min_ = x[0]
min_index = 0
for i in range(0, len(x)):
if x[i] < min_:
min_ = x[i]
min_index =i
return min_index
#argmax implementation
def arg_max(self, x):
max_ = x[0]
max_index = 0
for i in range(0, len(x)):
if x[i] > max_:
max_ = x[i]
max_index =i
return max_index
#verifies that each item in the array has equal lengths
def verify_dimensions(self, x):
random_pick = len(x[random.randint(0,len(x)-1)])
if sum(map(len, x))/len(x) != random_pick:
return False
else:
return True
#def scalar product with a vector
def scalar_product(self, vec, q):
for i, a in enumerate(vec):
vec[i] = vec[i] * q
return vec
#multiplies a column vector x with matrix y
def prod(self, x, y):
result = []
for a in y:
l = []
for i,b in enumerate(x):
l.append(a[i]*b)
result.append(sum(l))
return result
#dot product of two vectors
def dot(self, x, y):
return sum(a*b for a,b in zip(x, y))
#multiplies a vector with a matrix (another case specific version)
def prod_2(self, ui, cvg):
r = []
for i, a in enumerate(ui):
r.append(self.dot(ui, cvg[i]))
return r
#scalar produvt with a vector
def sclr_prod_(self, scalar, vector):
result = []
for i, a in enumerate(vector):
result.append(vector[i]*scalar)
return result
|
saifuddin778/pwperceptrons
|
tools.py
|
Python
|
mit
| 10,032
|
[
"BLAST"
] |
57183b32dfca4228ee1d486242e4cef2158226b3602c89d77fb45fd10c236bfa
|
"""Support for control of ElkM1 lighting (X10, UPB, etc)."""
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS, Light)
from homeassistant.components.elkm1 import (
DOMAIN as ELK_DOMAIN, ElkEntity, create_elk_entities)
DEPENDENCIES = [ELK_DOMAIN]
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Elk light platform."""
if discovery_info is None:
return
elk = hass.data[ELK_DOMAIN]['elk']
async_add_entities(
create_elk_entities(hass, elk.lights, 'plc', ElkLight, []), True)
class ElkLight(ElkEntity, Light):
"""Representation of an Elk lighting device."""
def __init__(self, element, elk, elk_data):
"""Initialize the Elk light."""
super().__init__(element, elk, elk_data)
self._brightness = self._element.status
@property
def brightness(self):
"""Get the brightness."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def is_on(self) -> bool:
"""Get the current brightness."""
return self._brightness != 0
def _element_changed(self, element, changeset):
status = self._element.status if self._element.status != 1 else 100
self._brightness = round(status * 2.55)
async def async_turn_on(self, **kwargs):
"""Turn on the light."""
self._element.level(round(kwargs.get(ATTR_BRIGHTNESS, 255) / 2.55))
async def async_turn_off(self, **kwargs):
"""Turn off the light."""
self._element.level(0)
|
HydrelioxGitHub/home-assistant
|
homeassistant/components/elkm1/light.py
|
Python
|
apache-2.0
| 1,678
|
[
"Elk"
] |
c47da9e960f5941d95c38758a2be3d559c7a77e0391b1325f07ee2d9d974ca68
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
# TODO(phajdan.jr): Make licensecheck not print the comma after 3.1.
'BSL (v1.0) GPL (v3.1,)',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'ISC',
'LGPL',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v3 or later)',
# TODO(phajdan.jr): Make licensecheck convert that comma to a dot.
'LGPL (v2,1 or later)',
'LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1) GPL (unversioned/unknown version)',
# TODO(phajdan.jr): Make licensecheck not print the comma after 1.1.
'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2 or later)',
'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2.1 or later)',
'MIT/X11 (BSD like)',
'Ms-PL',
'Public domain',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
'chrome/test/data/layout_tests/LayoutTests': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'data/mozilla_js_tests': [
'UNKNOWN',
],
'data/page_cycler': [
'UNKNOWN',
'GPL (v2 or later)',
],
'data/tab_switching': [
'UNKNOWN',
],
'googleurl': [ # http://code.google.com/p/google-url/issues/detail?id=15
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSL (v1.0) GPL',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
# TODO(phajdan.jr): Make licensecheck not print the comma after v2.
'GPL (v2,)',
'GPL (v2 or later)',
# TODO(phajdan.jr): Make licensecheck not print the comma after 3.1.
'GPL (v3.1,)',
'GPL (v3 or later)',
],
'net/disk_cache/hash.cc': [ # http://crbug.com/98100
'UNKNOWN',
],
'net/tools/spdyshark': [
'GPL (v2 or later)',
'UNKNOWN',
],
# http://crbug.com/98107
'ppapi/c/documentation/check.sh': [
'UNKNOWN',
],
'ppapi/cpp/documentation/check.sh': [
'UNKNOWN',
],
'ppapi/lib/gl/include': [
'UNKNOWN',
],
'ppapi/native_client/tests/earth/earth_image.inc': [
'UNKNOWN',
],
'third_party/WebKit': [
'UNKNOWN',
],
'third_party/WebKit/Source/JavaScriptCore/tests/mozilla': [
'GPL',
'GPL (unversioned/unknown version)',
],
'third_party/active_doc': [ # http://crbug.com/98113
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
'third_party/bsdiff/mbsdiff.cc': [
'UNKNOWN',
],
'third_party/bzip2': [
'UNKNOWN',
],
'third_party/cld/encodings/compact_lang_det': [ # http://crbug.com/98120
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/gles2_book': [ # http://crbug.com/98130
'UNKNOWN',
],
'third_party/gles2_conform/GTF_ES': [ # http://crbug.com/98131
'UNKNOWN',
],
'third_party/harfbuzz': [ # http://crbug.com/98133
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/jemalloc': [ # http://crbug.com/98302
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/lcov-1.9/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg': [ # http://crbug.com/98313
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libusb/src/libusb/version.h': [
'UNKNOWN',
],
'third_party/libusb/src/autogen.sh': [
'UNKNOWN',
],
'third_party/libusb/src/config.h': [
'UNKNOWN',
],
'third_party/libusb/src/msvc/config.h': [
'UNKNOWN',
],
# The package has a compatible COPYING file and most source files conform,
# but there are several exceptions.
# TODO(posciak,fischman): remove this exception once upstream makes all
# their files conform. https://bugs.freedesktop.org/show_bug.cgi?id=49588
# http://crbug.com/126466
'third_party/libva/va/va_x11.h': [
'UNKNOWN',
],
'third_party/libva/va/va_android.h': [
'UNKNOWN',
],
'third_party/libva/va/x11/va_dricommon.h': [
'UNKNOWN',
],
'third_party/libva/va/x11/va_dri2tokens.h': [
'UNKNOWN',
],
'third_party/libva/va/x11/va_dri2str.h': [
'UNKNOWN',
],
'third_party/libva/va/x11/va_dri2.h': [
'UNKNOWN',
],
'third_party/libva/va/va_egl.h': [
'UNKNOWN',
],
'third_party/libva/va/egl/va_egl_impl.h': [
'UNKNOWN',
],
'third_party/libva/va/egl/va_egl_private.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libvpx/source/libvpx/examples/includes': [
'GPL (v2 or later)',
],
'third_party/libwebp': [ # http://crbug.com/98448
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/MesaLib': [
'GPL (v2)',
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/npapi/npspy/extern/java': [
'GPL (unversioned/unknown version)',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao/NSBezierPath+MCAdditions.h': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/npapi/npspy': [
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/ply/__init__.py': [
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
'third_party/pylib': [
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/sqlite': [
'UNKNOWN',
],
'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/grit/grit/node/custom/__init__.py': [
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/histograms': [
'UNKNOWN',
],
'tools/memory_watcher': [
'UNKNOWN',
],
'tools/playback_benchmark': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/site_compare': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'webkit/data/ico_decoder': [
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path, '-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
success = True
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/') or filename.startswith('sconsbuild/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
found_path_specific = False
for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES:
if (filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]):
found_path_specific = True
break
if found_path_specific:
continue
print "'%s' has non-whitelisted license '%s'" % (filename, license)
success = False
if success:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
return 1
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
|
robclark/chromium
|
tools/checklicenses/checklicenses.py
|
Python
|
bsd-3-clause
| 15,399
|
[
"Galaxy"
] |
a0c84b5bf341ea35d2f7c7bf66a24ecf37e5e67f15e90b0f45bcbb6e1e3a3c7d
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# checkprgver.py file is part of spman
#
# spman - Slackware package manager
# Home page: https://github.com/MyRequiem/spman
#
# Copyright (c) 2018 Vladimir MyRequiem Astrakhan, Russia
# <mrvladislavovich@gmail.com>
# All rights reserved
# See LICENSE for details.
"""
Check program version
"""
from ssl import _create_unverified_context
from sys import stderr, stdout
from urllib.request import urlopen
from .maindata import MainData
def check_prg_ver() -> None:
"""
check program version
"""
meta = MainData()
local_ver = meta.prog_version
print(('Installed version: {0}\n{1}Checking '
'latest release version...{2}').format(local_ver,
meta.clrs['grey'],
meta.clrs['reset']))
# search latest release on https://github.com/MyRequiem/spman/releases
url = '{0}/releases'.format(meta.home_page)
_context = _create_unverified_context()
open_url = urlopen(url, context=_context)
bytes_content = open_url.read()
open_url.close()
# bytes --> str
html = str(bytes_content, encoding=(stdout.encoding or stderr.encoding))
# <a href="/MyRequiem/spman/archive/1.1.1.zip" rel="nofollow">
# split('/MyRequiem/spman/archive/')
spl = '/{0}/archive/'.format('/'.join(meta.home_page.split('/')[3:]))
version = '.'.join(html.split(spl)[1].split('.')[:3])
if version != local_ver:
# https://github.com/MyRequiem/spman/archive/1.5.4/spman-1.5.4.tar.gz
print(('{0}New version are available:{1} {3}\n' +
'Visit: {2}/releases\nor download new version source code:\n' +
'{2}/archive/{3}/{4}-{3}.tar.gz').format(meta.clrs['lred'],
meta.clrs['reset'],
meta.home_page,
version,
meta.prog_name))
else:
print(('{0}You are using the latest program '
'version{1}').format(meta.clrs['green'],
meta.clrs['reset']))
|
MyRequiem/spman
|
src/checkprgver.py
|
Python
|
mit
| 2,260
|
[
"VisIt"
] |
22f1582c458cf2d20f0007c76a6c0e031acb8fdd6f2e88e8c1ebd3ef8732d020
|
# -*- coding:Utf-8 -*-
import doctest
import os
import logging
import pdb
import sys
import numpy as np
import scipy as sp
import scipy.io as io
import scipy.signal as si
import scipy.linalg as la
import matplotlib.pylab as plt
import pylayers.signal.bsignal as bs
from pylayers.measures import mesuwb
class Waveform(dict):
"""
Attributes
----------
st : time domain
sf : frequency domain
sfg : frequency domain integrated
Methods
-------
eval
showpsd
ip_generic
fromfile
fromfile2
read
gui
show
"""
def __init__(self,**kwargs):
"""
Parameters
----------
'typ' : string
'generic',
'WGHz': float
0.499
'fcGHz': float
4.493
'fsGHz': float
100,
'threshdB':
3,
'twns': float
30
typ : 'generic','W1compensate','W1offset'
"""
defaults = {'typ':'generic',
'fGHz':[],
'WGHz': 0.499,
'fcGHz': 4.493,
'feGHz': 100,
'threshdB': 3,
'twns': 30}
for key, value in defaults.items():
if key not in kwargs:
self[key] = value
else:
self[key] = kwargs[key]
self.eval()
def eval(self):
u""" evaluate waveform
The :math:`\lambda/4*\pi` factor which is necessary to get the proper budget
link ( from the Friis formula) is introduced in this function.
"""
if self['typ'] == 'generic':
[st,sf]=self.ip_generic()
#elif self['typ'] == 'mbofdm':
# [st,sf]=self.mbofdm()
elif self['typ'] == 'W1compensate':
[st,sf]=self.fromfile()
elif self['typ'] == 'W1offset':
[st,sf]=self.fromfile2()
elif self['typ'] == 'blackmann':
sf = FUsignal(x=fGHz,y=np.blackman(len(fGHz)))
st = sf.ift()
elif self['typ'] == 'hamming':
sf = FUsignal(x=fGHz,y=np.ones(len(fGHz)))
st = sf.ift()
elif self['typ'] == 'hamming':
sf = FUsignal(x=fGHz,y=np.hamming(len(fGHz)))
st = sf.ift()
elif self['typ'] == 'ref156':
[st,sf]=self.ref156()
else:
logging.critical('waveform typ not recognized, check your config \
file')
self.st = st
self.sf = sf
self.fGHz = self.sf.x
ygamma = -1j*0.3/(4*np.pi*self.fGHz)
self.gamm = bs.FUsignal(x=self.fGHz,y=ygamma)
self.sfg = self.sf*self.gamm
self.sfgh = self.sfg.symH(0)
self.stgh = self.sfgh.ifft(1)
def info(self):
""" display information about waveform
Results
-------
>>> from pylayers.signal.waveform import *
>>> w = Waveform(typ='generic',WGHz=0.499,fcGHz=4.49,feGHz=100,threshdB=3,twns=30)
>>> w.show()
>>> plt.show()
"""
if self['typ']=='generic':
for k in self.keys():
print(k , " : ",self[k])
else:
print("typ:",self['typ'])
def showpsd(self,Tpns=1000):
""" show psd
Parameters
----------
Tpns : float
"""
plt.subplot(211)
self.st.plot()
plt.subplot(212)
psd = self.st.psd(Tpns,50)
plt.title('Tp = '+str(Tpns)+' ns')
psd.plotdB(mask=True)
def ip_generic(self):
""" Create an energy normalized Gaussian impulse (Usignal)
ip_generic(self,parameters)
"""
Tw = self['twns']
fcGHz = self['fcGHz']
WGHz = self['WGHz']
thresh = self['threshdB']
feGHz = self['feGHz']
te = 1.0/feGHz
self['te'] = te
Np = feGHz*Tw
self['Np'] = Np
#x = np.linspace(-0.5*Tw+te/2,0.5*Tw+te/2,Np,endpoint=False)
#x = arange(-Tw,Tw,te)
w = bs.TUsignal()
w.EnImpulse(fcGHz=fcGHz,WGHz=WGHz,threshdB=thresh,feGHz=feGHz)
#W = w.ft()
W = w.ft()
return (w,W)
def ref156(self,beta=0.5):
""" reference pulse of IEEE 802.15.6 UWB standard
Parameters
----------
beta : float
roll-off factor
Tns = 1/499.2MHz
Notes
-----
From P8O2.15.6/D02 December 2010 Formula 96 p 215
"""
Tw = self['twns']
fe = self['feGHz']
te = 1./fe
beta = 0.5
Tns = 1./0.4992
x = np.linspace(-0.5*Tw+te/2, 0.5*Tw+te/2, Np, endpoint=False)
z = x/Tns
t1 = np.sin(np.pi*(1-beta)*z)
t2 = np.cos(np.pi*(1+beta)*z)
t3 = (np.pi*z)*(1-(4*beta*z)**2)
y = (t1 + 4*beta*z*t2)/t3
st = bs.TUsignal()
st.x = x
st.y = y
sf = st.ftshift()
return(st,sf)
def fromfile(self):
""" get the measurement waveform from WHERE1 measurement campaign
This function is not yet generic
>>> from pylayers.signal.waveform import *
>>> wav = Waveform(typ='W1compensate')
>>> wav.show()
"""
M = mesuwb.UWBMeasure(1,h=1)
w = bs.TUsignal()
ts = M.RAW_DATA.timetx[0]
tns = ts*1e9
te = tns[1]-tns[0]
y = M.RAW_DATA.tx[0]
# find peak position u is the index of the peak
# yap :after peak
# ybp : before peak
# yzp : zero padding
maxy = max(y)
u = np.where(y ==maxy)[0][0]
yap = y[u:]
ybp = y[0:u]
yzp = np.zeros(len(yap)-len(ybp)-1)
tnsp = np.arange(0, tns[-1]-tns[u]+0.5*te, te)
tnsm = np.arange(-(tns[-1]-tns[u]), 0, te)
y = np.hstack((yzp, np.hstack((ybp, yap))))
tns = np.hstack((tnsm, tnsp))
#
# Warning (check if 1/sqrt(30) is not applied elsewhere
#
w.x = tns
w.y = y[None,:]*(1/np.sqrt(30))
# w : TUsignal
# W : FUsignal (Hermitian redundancy removed)
W = w.ftshift()
return (w,W)
def fromfile2(self):
"""
get the measurement waveform from WHERE1 measurement campaign
This function is not yet generic
>>> from pylayers.signal.waveform import *
>>> wav = Waveform(typ='W1offset')
>>> wav.show()
"""
M = mesuwb.UWBMeasure(1,1)
w = bs.TUsignal()
ts = M.RAW_DATA.timetx[0]
tns = ts*1e9
te = tns[1]-tns[0]
y = M.RAW_DATA.tx[0]
# find peak position u is the index of the peak
# yap :after peak
# ybp : before peak
# yzp : zero padding
# maxy = max(y)
# u = np.where(y ==maxy)[0][0]
# yap = y[u:]
# ybp = y[0:u]
yzp = np.zeros(len(y)-1)
# tnsp = np.arange(0,tns[-1]-tns[u]+0.5*te,te)
# tnsm = np.arange(-(tns[-1]-tns[u]),0,te)
N = len(ts)-1
tnsm = np.linspace(-tns[-1],-te,N)
y = np.hstack((yzp,y))
tns = np.hstack((tnsm,tns))
#
# Warning (check if 1/sqrt(30) is not applied elsewhere
#
w.x = tns
w.y = (y*(1/np.sqrt(30)))[None,:]
# w : TUsignal
# W : FUsignal (Hermitian redundancy removed)
W = w.ftshift()
return (w,W)
def read(self,config):
"""
Parameters
----------
config : ConfigParser object
Returns
-------
w : waveform
"""
par = config.items("waveform")
for k in range(len(par)):
key = par[k][0]
val = par[k][1]
if key == "WGHz":
self[key] = float(val)
if key == "fcGHz":
self[key] = float(val)
if key == "feGHz":
self[key] = float(val)
if key == "threshdB":
self[key] = float(val)
if key == "twns":
self[key] = float(val)
if key == "typ":
self[key] = val
self.eval()
def bandwidth(self,th_ratio=10000,Npt=100):
""" Determine effective bandwidth of the waveform.
Parameters
----------
th_ratio : float
threshold ratio
threshold = max(abs())/th_ratio
Npt : Number of points
"""
u=np.where(np.abs(self.sf.y)>np.max(np.abs(self.sf.y))/th_ratio)
#fGHz = self.sf.x[u[1]]
fGHz_start = self.sf.x[u[1]][0]
fGHz_stop = self.sf.x[u[1]][-1]
fGHz = np.linspace(fGHz_start,fGHz_stop,Npt)
return fGHz
def gui(self):
"""
Get the Waveform parameter
"""
if self['typ'] == 'generic':
self.st.plot()
show()
wavegui = multenterbox('','Waveform Parameter',
('Tw (ns) integer value',
'fc (GHz)',
'W (GHz)',
'thresh (dB)',
'fe (GHz) integer value'),
( self['twns'] ,
self['fcGHz'] ,
self['WGHz'] ,
self['threshdB'],
self['feGHz']
))
self.parameters['Twns'] = eval(wavegui[0])
self.parameters['fcGHz'] = eval(wavegui[1])
self.parameters['WGHz'] = eval(wavegui[2])
self.parameters['threshdB'] = eval(wavegui[3])
self.parameters['feGHz'] = eval(wavegui[4])
[st,sf] = self.ip_generic()
self.st = st
self.sf = sf
st.plot()
show()
def show(self,fig=[]):
""" show waveform in time and frequency domain
Parameters
----------
fig : figure
"""
# title construction
if fig ==[]:
fig = plt.figure()
title =''
for pk in self.keys():
val = self[pk]
title = title + pk + ': '
if type(val) != 'str':
title = title + str(val) + ' '
#plt.title(title)
ax1 = fig.add_subplot(2,1,1)
ax1.plot(self.st.x,self.st.y[0,:])
plt.xlabel('time (ns)')
plt.ylabel('level in linear scale')
ax2 = fig.add_subplot(2,1,2)
ax2.plot(self.sf.x,abs(self.sf.y[0,:]))
plt.xlabel('frequency (GHz)')
plt.ylabel('level in linear scale')
fig.suptitle(title)
if __name__ == "__main__":
plt.ion()
doctest.testmod()
|
dialounke/pylayers
|
pylayers/signal/waveform.py
|
Python
|
mit
| 10,616
|
[
"Gaussian"
] |
000046847467ed04f9e1c128c4de08ad530f96bfae34a0a9b83d5a5f7ce593d9
|
"""
Acceptance tests for grade settings in Studio.
"""
from common.test.acceptance.pages.studio.settings_graders import GradingPage
from common.test.acceptance.tests.studio.base_studio_test import StudioCourseTest
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from bok_choy.promise import EmptyPromise
class GradingPageTest(StudioCourseTest):
"""
Bockchoy tests to add/edit grade settings in studio.
"""
url = None
GRACE_FIELD_CSS = "#course-grading-graceperiod"
def setUp(self): # pylint: disable=arguments-differ
super(GradingPageTest, self).setUp()
self.grading_page = GradingPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
self.grading_page.visit()
self.ensure_input_fields_are_loaded()
def ensure_input_fields_are_loaded(self):
"""
Ensures values in input fields are loaded.
"""
EmptyPromise(
lambda: self.grading_page.q(css=self.GRACE_FIELD_CSS).attrs('value')[0],
"Waiting for input fields to be loaded"
).fulfill()
def populate_course_fixture(self, course_fixture):
"""
Return a test course fixture.
"""
course_fixture.add_children(
XBlockFixtureDesc("chapter", "Test Section").add_children(
XBlockFixtureDesc("sequential", "Test Subsection").add_children(
)
)
)
def test_add_grade_range(self):
"""
Scenario: Users can add grading ranges
Given I have opened a new course in Studio
And I am viewing the grading settings
When I add "1" new grade
Then I see I now have "3"
"""
length = self.grading_page.total_number_of_grades
self.grading_page.click_add_grade()
self.assertTrue(self.grading_page.is_grade_added(length))
self.grading_page.save()
self.grading_page.refresh_and_wait_for_load()
total_number_of_grades = self.grading_page.total_number_of_grades
self.assertEqual(total_number_of_grades, 3)
def test_staff_can_add_up_to_five_grades_only(self):
"""
Scenario: Users can only have up to 5 grading ranges
Given I have opened a new course in Studio
And I am viewing the grading settings
When I try to add more than 5 grades
Then I see I have only "5" grades
"""
for grade_ordinal in range(1, 5):
length = self.grading_page.total_number_of_grades
self.grading_page.click_add_grade()
# By default page has 2 grades, so greater than 3 means, attempt is made to add 6th grade
if grade_ordinal > 3:
self.assertFalse(self.grading_page.is_grade_added(length))
else:
self.assertTrue(self.grading_page.is_grade_added(length))
self.grading_page.save()
self.grading_page.refresh_and_wait_for_load()
total_number_of_grades = self.grading_page.total_number_of_grades
self.assertEqual(total_number_of_grades, 5)
def test_grades_remain_consistent(self):
"""
Scenario: When user removes a grade the remaining grades should be consistent
Given I have opened a new course in Studio
And I am viewing the grading settings
When I add "2" new grade
Then Grade list has "A,B,C,F" grades
And I delete a grade
Then Grade list has "A,B,F" grades
"""
for _ in range(2):
length = self.grading_page.total_number_of_grades
self.grading_page.click_add_grade()
self.assertTrue(self.grading_page.is_grade_added(length))
self.grading_page.save()
grades_alphabets = self.grading_page.grade_letters
self.assertEqual(grades_alphabets, ['A', 'B', 'C', 'F'])
self.grading_page.remove_grades(1)
self.grading_page.save()
grades_alphabets = self.grading_page.grade_letters
self.assertEqual(grades_alphabets, ['A', 'B', 'F'])
def test_staff_can_delete_grade_range(self):
"""
Scenario: Users can delete grading ranges
Given I have opened a new course in Studio
And I am viewing the grading settings
When I add "1" new grade
And I delete a grade
Then I see I now have "2" grades
"""
length = self.grading_page.total_number_of_grades
self.grading_page.click_add_grade()
self.assertTrue(self.grading_page.is_grade_added(length))
self.grading_page.save()
total_number_of_grades = self.grading_page.total_number_of_grades
self.assertEqual(total_number_of_grades, 3)
self.grading_page.remove_grades(1)
total_number_of_grades = self.grading_page.total_number_of_grades
self.assertEqual(total_number_of_grades, 2)
def test_staff_can_move_grading_ranges(self):
"""
Scenario: Users can move grading ranges
Given I have opened a new course in Studio
And I am viewing the grading settings
When I move a grading section
Then I see that the grade range has changed
"""
grade_ranges = self.grading_page.grades_range
self.assertIn('0-50', grade_ranges)
self.grading_page.drag_and_drop_grade()
grade_ranges = self.grading_page.grades_range
self.assertIn(
'0-3',
grade_ranges,
'expected range: 0-3, not found in grade ranges:{}'.format(grade_ranges)
)
def test_settings_are_persisted_on_save_only(self):
"""
Scenario: Settings are only persisted when saved
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change assignment type "Homework" to "New Type"
Then I do not see the changes persisted on refresh
"""
self.grading_page.change_assignment_name('Homework', 'New Type')
self.grading_page.refresh_and_wait_for_load()
self.assertIn('Homework', self.grading_page.get_assignment_names)
def test_settings_are_reset_on_cancel(self):
"""
Scenario: Settings are reset on cancel
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change assignment type "Homework" to "New Type"
And I press the "Cancel" notification button
Then I see the assignment type "Homework"
"""
self.grading_page.change_assignment_name('Homework', 'New Type')
self.grading_page.cancel()
assignment_names = self.grading_page.get_assignment_names
self.assertIn('Homework', assignment_names)
def test_confirmation_is_shown_on_save(self):
"""
Scenario: Confirmation is shown on save
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change assignment type "Homework" to "New Type"
And I press the "Save" notification button
Then I see a confirmation that my changes have been saved
"""
self.grading_page.change_assignment_name('Homework', 'New Type')
self.grading_page.save()
confirmation_message = self.grading_page.confirmation_message
self.assertEqual(confirmation_message, 'Your changes have been saved.')
def test_staff_can_set_weight_to_assignment(self):
"""
Scenario: Users can set weight to Assignment types
Given I have opened a new course in Studio
And I am viewing the grading settings
When I add a new assignment type "New Type"
And I set the assignment weight to "7"
And I press the "Save" notification button
Then the assignment weight is displayed as "7"
And I reload the page
Then the assignment weight is displayed as "7"
"""
self.grading_page.add_new_assignment_type()
self.grading_page.change_assignment_name('', 'New Type')
self.grading_page.set_weight('New Type', '7')
self.grading_page.save()
assignment_weight = self.grading_page.get_assignment_weight('New Type')
self.assertEqual(assignment_weight, '7')
self.grading_page.refresh_and_wait_for_load()
assignment_weight = self.grading_page.get_assignment_weight('New Type')
self.assertEqual(assignment_weight, '7')
def test_staff_cannot_save_invalid_settings(self):
"""
Scenario: User cannot save invalid settings
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change assignment type "Homework" to ""
Then the save notification button is disabled
"""
self.grading_page.change_assignment_name('Homework', '')
self.assertTrue(self.grading_page.is_notification_button_disbaled(), True)
def test_edit_highest_grade_name(self):
"""
Scenario: User can edit grading range names
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change the highest grade range to "Good"
And I press the "Save" notification button
And I reload the page
Then I see the highest grade range is "Good"
"""
self.grading_page.edit_grade_name('Good')
self.grading_page.save()
self.grading_page.refresh_and_wait_for_load()
grade_name = self.grading_page.highest_grade_name
self.assertEqual(grade_name, 'Good')
def test_staff_cannot_edit_lowest_grade_name(self):
"""
Scenario: User cannot edit failing grade range name
Given I have populated a new course in Studio
And I am viewing the grading settings
Then I cannot edit the "Fail" grade range
"""
self.grading_page.try_edit_fail_grade('Failure')
self.assertNotEqual(self.grading_page.lowest_grade_name, 'Failure')
def test_grace_period_wrapped_to_correct_time(self):
"""
Scenario: Grace periods of more than 59 minutes are wrapped to the correct time
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change the grace period to "01:99"
And I press the "Save" notification button
And I reload the page
Then I see the grace period is "02:39"
"""
self.ensure_input_fields_are_loaded()
self.grading_page.check_field_value('00:00')
self.grading_page.set_grace_period('01:99')
self.grading_page.check_field_value('01:99')
self.grading_page.click_button("save")
self.grading_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
grace_time = self.grading_page.grace_period_value
self.assertEqual(grace_time, '02:39')
def test_setting_grace_period_greater_than_one_day(self):
"""
Scenario: User can set a grace period greater than one day
Given I have populated a new course in Studio
And I am viewing the grading settings
When I change the grace period to "48:00"
And I press the "Save" notification button
And I reload the page
Then I see the grace period is "48:00"
"""
self.ensure_input_fields_are_loaded()
self.grading_page.check_field_value('00:00')
self.grading_page.set_grace_period('48:00')
self.grading_page.check_field_value('48:00')
self.grading_page.click_button("save")
self.grading_page.refresh_and_wait_for_load()
self.ensure_input_fields_are_loaded()
grace_time = self.grading_page.grace_period_value
self.assertEqual(grace_time, '48:00')
|
philanthropy-u/edx-platform
|
common/test/acceptance/tests/studio/test_studio_grading.py
|
Python
|
agpl-3.0
| 12,145
|
[
"VisIt"
] |
cc33ee00a58c1a5b14222fdf6d3f61ce56665559c6381a482751c0ec8ccab78c
|
# Code for astronomically useful functions -- especially functions dealing
# with surface-brightness or similar (e.g., spectroscopic) profiles.
#
# For consistency, all of the main functions should have the following
# signature:
#
# functionName( rValuesVector, parameterList, mag=True, magOutput=True )
#
# where rValuesVector can be a scalar, a Python list, or a numpy array;
# (the output will be a numpy array if the input is either a list or an
# array); parameterList is a Python list of parameter values; mag is a
# boolean indicating whether or not the input parameter values are in
# mag arcsec^-2 or not; and magOutput indicates whether the output should
# be in mag arcsec^-2 or not (mag must = True as well in this case!)
#
# By default, all functions have mag=True and magOutput=True.
#
# By default, *all* functions take a positional value (x0) as their first
# parameter, even if they ignore it.
import math
import numpy as np
import scipy.optimize
def gammainc_lower_scipy( z, b ):
return scipy.special.gamma(z) * scipy.special.gammainc(z, b)
try:
from mpmath import besselk as BesselK
from mpmath import gamma as Gamma
from mpmath import gammainc as GammaInc
mpmathPresent = True
except ImportError:
from scipy.special import kv as BesselK
from scipy.special import gamma as Gamma
GammaInc = gammainc_lower_scipy
mpmathPresent = False
# auxiliary functions used by other functions
def b_n_exact( n ):
"""Exact calculation of the Sersic derived parameter b_n, via solution
of the function
Gamma(2n) = 2 gamma_inc(2n, b_n)
where Gamma = Gamma function and gamma_inc = lower incomplete gamma function.
If n is a list or Numpy array, the return value is a 1-d Numpy array
"""
def myfunc(bn, n):
return abs(float(2*GammaInc(2*n, 0, bn) - Gamma(2*n)))
if np.iterable(n):
b = [scipy.optimize.brent(myfunc, (nn,)) for nn in n]
b = np.array(b)
else:
b = scipy.optimize.brent(myfunc, (n,))
return b
# Here begins the main set of imfit-compatible functions
def Moffat( r, params, mag=True, magOutput=True ):
"""Compute intensity at radius r for a Moffat profile, given the specified
vector of parameters:
params[0] = r0 = center of profile
params[1] = mu_0 or I_0 [depending on whether mag=True or not]
params[2] = fwhm
params[3] = beta
If mag=True, then the second parameter value is mu_0, not I_0, and
the value will be calculated in magnitudes, not intensities.
To have input mu_0 in magnitudes but *output* in intensity, set mag=True
and magOutput=False.
"""
r0 = params[0]
if mag is True:
mu_0 = params[1]
I_0 = 10**(-0.4*mu_0)
else:
I_0 = params[1]
fwhm = params[2]
beta = params[3]
exponent = math.pow(2.0, 1.0/beta)
alpha = 0.5*fwhm/math.sqrt(exponent - 1.0)
scaledR = np.abs(r - r0) / alpha
denominator = (1.0 + scaledR*scaledR)**beta
I = (I_0 / denominator)
if (mag is True) and (magOutput is True):
return -2.5 * np.log10(I)
else:
return I
def Sersic( r, params, mag=True, magOutput=True ):
"""Compute intensity at radius r for a Sersic profile, given the specified
vector of parameters:
params[0] = r0 = center of (symmetric) profile
params[1] = n
params[2] = I_e
params[3] = r_e
If mag=True, then the second parameter value is mu_e, not I_e, and
the value will be calculated in magnitudes, not intensities.
To have input I_e in magnitudes but *output* in intensity, set mag=True
and magOutput=False.
"""
r0 = params[0]
R = np.abs(r - r0)
n = params[1]
if mag is True:
mu_e = params[2]
I_e = 10**(-0.4*mu_e)
else:
I_e = params[2]
r_e = params[3]
I = I_e * np.exp( -b_n_exact(n)*(pow(R/r_e, 1.0/n) - 1.0) )
if (mag is True) and (magOutput is True):
return -2.5 * np.log10(I)
else:
return I
def Exponential( r, params, mag=True, magOutput=True ):
"""Compute intensity at radius r for an exponential profile, given the specified
vector of parameters:
params[0] = r0 = center of profile
params[1] = I_0
params[2] = h
If mag=True, then the first parameter value is mu_0, not I_0, and
the value will be calculated in magnitudes, not intensities.
To have input I_0 in magnitudes but *output* in intensity, set mag=True
and magOutput=False.
"""
r0 = params[0]
R = np.abs(r - r0)
if mag is True:
mu_0 = params[1]
I_0 = 10**(-0.4*mu_0)
else:
I_0 = params[1]
h = params[2]
I = I_0*np.exp(-R/h)
if (mag is True) and (magOutput is True):
return -2.5 * np.log10(I)
else:
return I
def BrokenExp( r, params, mag=True, magOutput=True ):
"""Calculate the value of a broken exponential function at r, given a
vector of parameters:
params[0] = r0 = center of profile
params[1] = I_0
params[2] = h1 [aka gamma]
params[3] = h2 [aka beta]
params[4] = r_b
params[5] = alpha
If mag=True, then the first parameter value is mu_0, not I_0, and
the value will be calculated in magnitudes, not intensities.
To have input I_0 in magnitudes but *output* in intensity, set mag=True
and magOutput=False.
FIXME: Need to handle case of alpha = 0
FIXME: Need to handle "if ( alpha*(r - Rb) > 100.0)" in case of numpy
array instead of scalar value of r.
"""
r0 = params[0]
R = np.abs(r - r0)
if mag is True:
mu_0 = params[1]
I_0 = 10.0**(-0.4*mu_0)
else:
I_0 = params[1]
h1 = params[2]
h2 = params[3]
Rb = params[4]
alpha = params[5]
exponent = (1.0/alpha) * (1.0/h1 - 1.0/h2)
S = (1.0 + np.exp(-alpha*Rb))**(-exponent)
# check for possible overflow in exponentiatino if r >> Rb
if type(R) is np.ndarray:
# OK, we're dealing with a numpy array
# note that we're assuming that r is monotonically increasing!
scaledR = alpha*(R - Rb)
if scaledR[0] > 100.0:
# all r are beyond crossover point
I = I = I_0 * S * np.exp(Rb/h2 - Rb/h1 - R/h2)
elif scaledR[-1] < 100.0:
# no r are beyond crossover point
I = I_0 * S * np.exp(-R/h1) * (1.0 + np.exp(alpha*(R - Rb)))**exponent
else:
# OK, some r are < crossover point, some are > crossover point
goodInd = [ i for i in range(len(R)) if scaledR[i] < 100.0 ]
crossoverInd = goodInd[-1]
I = np.zeros(len(r))
I[0:crossoverInd] = I_0 * S * np.exp(-R[0:crossoverInd]/h1) * (1.0 +
np.exp(alpha*(R[0:crossoverInd] - Rb)))**exponent
I[crossoverInd:] = I_0 * S * np.exp(Rb/h2 - Rb/h1 - R[crossoverInd:]/h2)
elif ( alpha*(R - Rb) > 100.0):
# scalar value of r, with r > crossover point
# approximate form for outer exponential:
I = I_0 * S * np.exp(Rb/h2 - Rb/h1 - R/h2)
else:
# scalar value of r, with r < crossover point
# fully correct calculation:
I = I_0 * S * np.exp(-R/h1) * (1.0 + np.exp(alpha*(R - Rb)))**exponent
if (mag is True) and (magOutput is True):
return -2.5 * np.log10(I)
else:
return I
def Sech( r, params, mag=True, magOutput=True ):
"""Compute intensity at radius r for a sech profile, given the specified
vector of parameters:
params[0] = r0 = center of profile
params[1] = I_0
params[2] = h
If mag=True, then the first parameter value is mu_0, not I_0, and
the value will be calculated in magnitudes, not intensities.
To have input I_0 in magnitudes but *output* in intensity, set mag=True
and magOutput=False.
"""
r0 = params[0]
R = np.abs(r - r0)
if mag is True:
mu_0 = params[1]
I_0 = 10**(-0.4*mu_0)
else:
I_0 = params[1]
h = params[2]
sech = (1.0/np.cosh(R/h))
I = I_0*sech
if (mag is True) and (magOutput is True):
return -2.5 * np.log10(I)
else:
return I
def Sech2( r, params, mag=True, magOutput=True ):
"""Compute intensity at radius r for a sech^2 profile, given the specified
vector of parameters:
params[0] = r0 = center of profile
params[1] = I_0
params[2] = h
If mag=True, then the first parameter value is mu_0, not I_0, and
the value will be calculated in magnitudes, not intensities.
To have input I_0 in magnitudes but *output* in intensity, set mag=True
and magOutput=False.
"""
r0 = params[0]
R = np.abs(r - r0)
if mag is True:
mu_0 = params[1]
I_0 = 10**(-0.4*mu_0)
else:
I_0 = params[1]
h = params[2]
sech2 = (1.0/np.cosh(R/h))**2
I = I_0*sech2
if (mag is True) and (magOutput is True):
return -2.5 * np.log10(I)
else:
return I
def vdKSech( r, params, mag=True, magOutput=True ):
"""Compute intensity at radius r [= vertical height z in the case of off-plane
profiles] for a profile following van der Kruit's (1988)
generalized sech function, given the specified vector of parameters:
params[0] = r0 = center of profile
params[1] = I_0
params[2] = z_0 = scale length (or height) of profile
params[3] = alpha [= n/2 in van der Kruit's formulation]
If mag=True, then the first parameter value is mu_0, not I_0, and
the value will be calculated in magnitudes, not intensities.
To have input I_0 in magnitudes but *output* in intensity, set mag=True
and magOutput=False.
"""
r0 = params[0]
R = np.abs(r - r0)
if mag is True:
mu_0 = params[1]
I_0 = 10**(-0.4*mu_0)
else:
I_0 = params[1]
z_0 = params[2]
alpha = params[3]
sech_alpha = (1.0/np.cosh(R/(alpha*z_0)))**alpha
# Note that the following scaling (the 2**(-alpha) part) ensures that profiles
# which differ only in alpha will converge to the same quasi-exponential profile at
# large radii, but will *differ* as r --> 0.
I = I_0 * (2.0**(-alpha)) * sech_alpha
if (mag is True) and (magOutput is True):
return -2.5 * np.log10(I)
else:
return I
def Gauss( x, params, mag=True, magOutput=True ):
"""Compute surface brightness for a profile consisting of a Gaussian,
given input parameters in vector params:
params[0] = x-value of Gaussian center.
params[1] = A_gauss_mag [= magnitudes/sq.arcsec if mag=True]
params[2] = sigma
"""
x0 = params[0]
if mag is True:
A_gauss_mag = params[1]
A = 10.0**(-0.4*A_gauss_mag)
else:
A = params[1]
sigma = params[2]
scaledX = np.abs(x - x0)
I_gauss = A * np.exp(-(scaledX*scaledX)/(2.0*sigma*sigma))
if (mag is True) and (magOutput is True):
mu_gauss = -2.5 * np.log10(I_gauss)
return mu_gauss
else:
return I_gauss
def GaussRing( x, params, mag=True, magOutput=True ):
"""Compute surface brightness for a profile consisting of a Gaussian,
given input parameters in vector params:
params[0] = ignored.
params[1] = A_gauss_mag [= magnitudes/sq.arcsec if mag=True]
params[2] = x-value of Gaussian center (i.e., ring radius)
params[3] = sigma
This is the Gaussian for a *ring* with ring (major-axis) radius = params[2]
"""
x0 = params[2]
if mag is True:
A_gauss_mag = params[1]
A = 10.0**(-0.4*A_gauss_mag)
else:
A = params[1]
sigma = params[3]
scaledX = np.abs(x - x0)
I_gauss = A * np.exp(-(scaledX*scaledX)/(2.0*sigma*sigma))
if (mag is True) and (magOutput is True):
mu_gauss = -2.5 * np.log10(I_gauss)
return mu_gauss
else:
return I_gauss
def Gauss2Side( x, params, mag=True, magOutput=True ):
"""Compute surface brightness for a profile consisting of an asymmetric
Gaussian, given input parameters in vector params:
params[0] = x-value of Gaussian center.
params[1] = A_gauss_mag [= magnitudes/sq.arcsec if mag=True]
params[2] = sigma_left
params[3] = sigma_right
"""
x0 = params[0]
if mag:
A_gauss_mag = params[1]
A = 10.0**(-0.4*A_gauss_mag)
else:
A = params[1]
sigma_left = params[2]
sigma_right = params[3]
X = x - x0
if type(X) is np.ndarray:
nPts = X.size
I_gauss = []
for i in range(nPts):
if X[i] < 0:
sigma = sigma_left
else:
sigma = sigma_right
I_gauss.append( A * np.exp(-(X[i]*X[i])/(2.0*sigma*sigma)) )
I_gauss = np.array(I_gauss)
else:
if (X < 0):
sigma = sigma_left
else:
sigma = sigma_right
I_gauss = A * np.exp(-(X*X)/(2.0*sigma*sigma))
if (mag is True) and (magOutput is True):
mu_gauss = -2.5 * np.log10(I_gauss)
return mu_gauss
else:
return I_gauss
def GaussRing2Side( x, params, mag=True, magOutput=True ):
"""Compute surface brightness for a profile consisting of an asymmetric
Gaussian, given input parameters in vector params:
params[0] = ignored.
params[1] = A_gauss_mag [= magnitudes/sq.arcsec if mag=True]
params[2] = x-value of Gaussian center (i.e., ring radius)
params[3] = sigma_left
params[4] = sigma_right
This is the 2-sided Gaussian for a *ring* with ring (major-axis) radius =
params[2]
"""
x0 = params[2]
if mag:
A_gauss_mag = params[1]
A = 10.0**(-0.4*A_gauss_mag)
else:
A = params[1]
sigma_left = params[3]
sigma_right = params[4]
X = x - x0
if type(X) is np.ndarray:
nPts = X.size
I_gauss = []
for i in range(nPts):
if X[i] < 0:
sigma = sigma_left
else:
sigma = sigma_right
I_gauss.append( A * np.exp(-(X[i]*X[i])/(2.0*sigma*sigma)) )
I_gauss = np.array(I_gauss)
else:
if (X < 0):
sigma = sigma_left
else:
sigma = sigma_right
I_gauss = A * np.exp(-(X*X)/(2.0*sigma*sigma))
if (mag is True) and (magOutput is True):
mu_gauss = -2.5 * np.log10(I_gauss)
return mu_gauss
else:
return I_gauss
# Some alternate functions, which do not necessarily follow the rules for
# the imfit-compatible functions given above.
def ExpMag( x, params ):
"""Compute surface brightness for a profile consisting of an exponential,
given input parameters in vector params:
params[0] = mu_0
params[1] = h
"""
mu_0 = params[0]
h = params[1]
return mu_0 + 1.085736*(x/h)
def vdKBessel( r, mu00, h ):
"""Implements the f(r) part of van der Kruit & Searle's (1981) edge-on
disk function.
For scalar values only!
"""
if r == 0:
return mu00
else:
return mu00 * (r/h) * BesselK(1, r/h)
def EdgeOnDisk(rr, p):
L_0 = p[0]
h = p[1]
mu00 = 2*h*L_0
if np.iterable(rr):
I = [ vdKBessel(r, mu00, h) for r in rr ]
I = np.array(I)
else:
I = vdKBessel(rr, mu00, h)
return I
# Total magnitudes, assuming that inputs are in units of
# counts/pixel and dimensions are in pixels
def TotalMagExp( params, zeroPoint=0, magOut=True, ell=0.0 ):
"""Calculate the total magnitude (or flux if magOut=False) for an
2D exponential with [I_0, h] = params, where I_0 is in counts/pixel
and h is in pixels. Optionally, the ellipticity can be specified.
"""
I_0 = params[0]
h = params[1]
totalFlux = 2 * math.pi * I_0 * (h*h) * (1.0 - ell)
if magOut:
return (zeroPoint - 2.5 * math.log10(totalFlux))
else:
return totalFlux
def TotalMagSersic( params, zeroPoint=0, magOut=True, ell=0.0 ):
"""Calculate the total magnitude (or flux if magOut=False) for a
2D Sersic function with [n, I_e, r_e] = params, where I_0 is in counts/pixel
and h is in pixels. Optionally, the ellipticity can be specified.
"""
n = params[0]
I_e = params[1]
r_e = params[2]
bn = b_n_exact(n)
bn2n = bn**(2*n)
totalFlux = 2 * math.pi * n * math.exp(bn) * I_e * (r_e*r_e) * (1.0 - ell)
totalFlux = totalFlux * Gamma(2*n) / bn2n
if magOut:
return (zeroPoint - 2.5 * math.log10(totalFlux))
else:
return totalFlux
|
perwin/imfit
|
python/imfit_funcs.py
|
Python
|
gpl-3.0
| 16,771
|
[
"Gaussian"
] |
5bb1a229411aa8d5d4c79b54401bcc48c993e9fe738d90eb41444ff8e42af7f1
|
from parsimonious.grammar import Grammar
from parsimonious.nodes import *
from decimal import Decimal
# ===================================================================
class Percentage(Decimal):
pass
# ===================================================================
class ConditionVisitor(NodeVisitor):
def __init__(self, data={}):
self._data = data
## data ---------------------------------------------------------
@property
def data(self):
""" The 'data' property """
return self._data
@data.setter
def data(self, value):
self._data = value
return self._data
@data.deleter
def data(self):
del self._data
# throwaway node handler ----------------------------------------
def generic_visit(self, node, children):
if len(children) == 1:
return children[0]
else:
return node.text.strip()
# proper node handlers ------------------------------------------
def visit_disjunction(self, node, (firstterm, otherterms)):
terms = [firstterm]
if len(otherterms) > 0:
terms.extend(otherterms)
return any(terms)
def visit_moreorconjunctions(self, node, terms):
return terms
def visit_orconjunction(self, node, (ws1, operator, ws2, conjunction)):
return conjunction
def visit_conjunction(self, node, (firstterm, otherterms)):
terms = [firstterm]
if len(otherterms) > 0:
terms.extend(otherterms)
return all(terms)
def visit_moreandconditions(self, node, terms):
return terms
def visit_andcondition(self, node, (ws1, operator, ws2, simplecondition)):
return simplecondition
def visit_simplecondition(self, node, children):
return children[0]
def visit_always(self, node, children):
return True
def visit_never(self, node, children):
return False
def visit_value(self, node, children):
# should be already resolved to a real value
return children[0]
def visit_numeric(self, node, children):
return Decimal(node.text)
def visit_percentage(self, node, children):
return Percentage(children[0])
def visit_varname(self, node, children):
if node.text in self._data:
# the varname they specified is known to us..
return self._data[node.text]
# otherwise except out
raise Exception("Unknown variable name: '" + node.text + "'")
def visit_range(self, node, children):
if isinstance(children[0], Percentage):
return children[0]
else:
return abs(children[0])
def visit_comparison(self, node, children):
return children[0]
def visit_simple_comparison(self, node, (left, ws1, comp, ws2, right)):
if comp == '==':
return (left == right)
if comp == '!=':
return (left != right)
if comp == '>=':
return (left >= right)
if comp == '>':
return (left > right)
if comp == '<=':
return (left <= right)
if comp == '<':
return (left < right)
raise Exception('comparison "' + comp + '" is not implemented')
def visit_range_muchlessthan_comparison(self, node, (left, ws1, pre, range, post, ws2, right)):
if isinstance(range, Percentage):
range = right * range / Decimal(100)
left_max = right - range
return (left < left_max)
def visit_range_leftrocket_comparison(self, node, (left, ws1, pre, range, post, ws2, right)):
if isinstance(range, Percentage):
range = right * range / Decimal(100)
left_min = right - range
left_max = right
return (left_min <= left < left_max)
def visit_range_eq_comparison(self, node, (left, ws1, pre, range, post, ws2, right)):
if isinstance(range, Percentage):
range = right * range / Decimal(100)
left_min = right - range
left_max = right + range
return (left_min <= left <= left_max)
def visit_range_neq_comparison(self, node, (left, ws1, pre, range, post, ws2, right)):
if isinstance(range, Percentage):
range = right * range / Decimal(100)
left_min = right - range
left_max = right + range
return not(left_min <= left <= left_max)
def visit_range_rightrocket_comparison(self, node, (left, ws1, pre, range, post, ws2, right)):
if isinstance(range, Percentage):
range = right * range / Decimal(100)
left_min = right
left_max = right + range
return (left_min < left <= left_max)
def visit_range_muchgreaterthan_comparison(self, node, (left, ws1, pre, range, post, ws2, right)):
if isinstance(range, Percentage):
range = right * range / Decimal(100)
left_min = right + range
return (left > left_min)
def visit_simple_comparator(self, node, (cmp)):
return cmp[0]
def visit_expression(self, node, (left, ws1, operator, ws2, right)):
if not isinstance(left, Decimal):
left = Decimal(repr(left))
if not isinstance(right, Decimal):
right = Decimal(repr(right))
if operator == '+':
return (left + right)
if operator == '-':
return (left - right)
if operator == '*':
return (left * right)
if operator == '/':
return (left / right)
raise Exception('operator "' + operator + '" is not implemented')
# ===================================================================
class ConditionParser(object):
def __init__(self, condition="always", data={}):
self._condition = condition
self._data = data
## condition ----------------------------------------------------
@property
def condition(self):
""" The 'condition' property """
return self._condition
@condition.setter
def condition(self, value):
self._condition = value
return self._condition
@condition.deleter
def condition(self):
del self._condition
## data ---------------------------------------------------------
@property
def data(self):
""" The 'data' property """
return self._data
@data.setter
def data(self, value):
self._data = value
return self._data
@data.deleter
def data(self):
del self._data
## result -------------------------------------------------------
@property
def result(self):
""" The 'result' property """
g = Grammar("""
disjunction = conjunction moreorconjunctions
moreorconjunctions = orconjunction*
orconjunction = rws op_or rws conjunction
conjunction = simplecondition moreandconditions
moreandconditions = andcondition*
andcondition = rws op_and rws simplecondition
simplecondition = always / never / comparison
op_or = ~"or"i
op_and = ~"and"i
ws = ~"\s*"
rws = ~"\s+"
never = ~"never"i
always = ~"always"i
value = expression / numeric / varname
numeric = ~"[+-]?\d+(\.\d+)?"
varname = ~"[a-z_][a-z0-9_]*"i
expression = term rws operator rws term
term = numeric / varname
operator = plus / minus / times / divide
plus = "+"
minus = "-"
times = "*"
divide = "/"
range = percentage / numeric
percentage = numeric percent_sign
percent_sign = "%"
comparison = range_eq_comparison / range_neq_comparison / range_leftrocket_comparison / range_rightrocket_comparison / range_muchlessthan_comparison / range_muchgreaterthan_comparison / simple_comparison
simple_comparison = value ws simple_comparator ws value
simple_comparator = cmp_eq / cmp_neq / cmp_gte / cmp_gt / cmp_lte / cmp_lt
cmp_eq = "=="
cmp_neq = "!="
cmp_gte = ">="
cmp_gt = ">"
cmp_lte = "<="
cmp_lt = "<"
range_muchlessthan_comparison = value ws range_lt_prev range range_lt_post ws value
range_lt_prev = "<"
range_lt_post = "<"
range_leftrocket_comparison = value ws range_lr_prev range range_lr_post ws value
range_lr_prev = "<"
range_lr_post = "="
range_eq_comparison = value ws range_eq_prev range range_eq_post ws value
range_eq_prev = "="
range_eq_post = "="
range_neq_comparison = value ws range_neq_prev range range_neq_post ws value
range_neq_prev = ">"
range_neq_post = "<"
range_rightrocket_comparison = value ws range_rr_prev range range_rr_post ws value
range_rr_prev = "="
range_rr_post = ">"
range_muchgreaterthan_comparison = value ws range_gt_prev range range_gt_post ws value
range_gt_prev = ">"
range_gt_post = ">"
""")
tree = g.parse(self._condition)
# print("\n----\n")
# print(repr(tree))
# print("\n----\n")
v = ConditionVisitor(self.data)
return v.visit(tree)
|
DanielBaird/climas-global
|
webapp/climasng/parsing/conditionparser.py
|
Python
|
apache-2.0
| 9,530
|
[
"VisIt"
] |
1532aa7ac928126c7b812a053e2551dd65c18c25f16bf87490530ae767765d5e
|
from aiida.parsers.parser import Parser
from aiida.parsers.exceptions import OutputParsingError
from aiida.orm.data.array import ArrayData
from aiida.orm.data.parameter import ParameterData
import numpy as np
def parse_FORCE_CONSTANTS(filename):
fcfile = open(filename)
num = int((fcfile.readline().strip().split())[0])
force_constants = np.zeros((num, num, 3, 3), dtype=float)
for i in range(num):
for j in range(num):
fcfile.readline()
tensor = []
for k in range(3):
tensor.append([float(x) for x in fcfile.readline().strip().split()])
force_constants[i, j] = np.array(tensor)
return force_constants
class PhonopyParser(Parser):
"""
Parser the FORCE_CONSTANTS file of phonopy.
"""
def __init__(self, calc):
"""
Initialize the instance of PhonopyParser
"""
super(PhonopyParser, self).__init__(calc)
def parse_with_retrieved(self, retrieved):
"""
Parses the datafolder, stores results.
"""
# suppose at the start that the job is successful
successful = True
# select the folder object
# Check that the retrieved folder is there
try:
out_folder = retrieved[self._calc._get_linkname_retrieved()]
except KeyError:
self.logger.error("No retrieved folder found")
return False, ()
# check what is inside the folder
list_of_files = out_folder.get_folder_list()
# OUTPUT file should exist
if not self._calc._OUTPUT_FILE_NAME in list_of_files:
successful = False
self.logger.error("Output file not found")
return successful, ()
# Get file and do the parsing
outfile = out_folder.get_abs_path(self._calc._OUTPUT_FILE_NAME)
force_constants = parse_FORCE_CONSTANTS(outfile)
# look at warnings
warnings = []
with open(out_folder.get_abs_path(self._calc._SCHED_ERROR_FILE)) as f:
errors = f.read()
if errors:
warnings = [errors]
# ====================== prepare the output node ======================
# save the outputs
new_nodes_list = []
# save force constants into node
try:
array_data = ArrayData()
array_data.set_array('force_constants', force_constants)
new_nodes_list.append(('array_data', array_data))
except KeyError: # keys not found in json
pass
# add the dictionary with warnings
new_nodes_list.append((self.get_linkname_outparams(), ParameterData(dict={'warnings': warnings})))
return successful, new_nodes_list
|
abelcarreras/aiida_extensions
|
plugins/parsers/phonopy.py
|
Python
|
mit
| 2,752
|
[
"phonopy"
] |
3e509cd6d1125979f679883a963c30cc6d362eaadab7c191d8b4b9f7d6483e9a
|
#
# Copyright 2014 Lars Pastewka (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from math import log
import numpy as np
import ase.io
from matscipy.fracture_mechanics.clusters import diamond_110_001
import atomistica
###
# Interaction potential
calc = atomistica.KumagaiScr()
# Fundamental material properties
el = 'Si'
a0 = 5.429
C11 = 166. # GPa
C12 = 65. # GPa
C44 = 77. # GPa
surface_energy = 1.08 * 10 # GPa*A = 0.1 J/m^2
# Crack system
n = [ 6, 6, 1 ]
crack_surface = [ 1, 1, 0 ]
crack_front = [ 0, 0, 1 ]
vacuum = 6.0
# Simulation control
bonds = [( 58, 59 ), (61, 84)]
k1 = 1.00
bond_lengths = np.linspace(2.5, 4.5, 41)
fmax = 0.001
# Setup crack system
cryst = diamond_110_001(el, a0, n, crack_surface, crack_front)
ase.io.write('cryst.cfg', cryst)
optimize_tip_position = True
|
libAtoms/matscipy
|
examples/fracture_mechanics/energy_barrier_multiple/params.py
|
Python
|
lgpl-2.1
| 1,694
|
[
"ASE",
"Matscipy"
] |
6c50fd237a63523de699bf0b747656cd175f1047b112d0aa3f1dd47de3899eea
|
#!/usr/bin/env python3.5
###############################################################################
# TransportLayerBot - An all-in-one user bot for Discord. #
# Copyright (C) 2016 TransportLayer #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published #
# by the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import logging
import argparse
import discord
import asyncio
from cleverbot import Cleverbot
from time import time
import random
from pymongo import MongoClient
from datetime import datetime
def safe_string(dangerous_string):
return dangerous_string.replace('\n', '\\n').replace('\r', '\\r').replace('\033[', '[CSI]').replace('\033', '[ESC]')
def setup_logger(level_string, log_file):
numeric_level = getattr(logging, level_string.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError("Invalid log level: {}".format(level_string))
verbose_formatter = logging.Formatter("[%(asctime)s] [%(name)s/%(levelname)s] %(message)s")
file_formatter = verbose_formatter
stdout_formatter = verbose_formatter if numeric_level == logging.DEBUG else logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s", "%H:%M:%S")
root_logger = logging.getLogger()
root_logger.setLevel(numeric_level)
file_logger = logging.FileHandler(log_file)
file_logger.setFormatter(file_formatter)
root_logger.addHandler(file_logger)
stdout_logger = logging.StreamHandler()
stdout_logger.setFormatter(stdout_formatter)
root_logger.addHandler(stdout_logger)
async def send_message(client, source, message):
prefix = None
if source.channel.is_private:
prefix = "{} (Private) {} ({})".format(source.channel.id, source.author.id, source.author.name)
else:
prefix = "{} {} ({} #{})".format(source.server.id, source.channel.id, source.server.name, source.channel.name)
logging.debug("{} <- {}".format(prefix, safe_string(message)))
await client.send_message(source.channel, message)
async def receive_message(source):
prefix = None
if source.channel.is_private:
prefix = "{} (Private) {} ({})".format(source.channel.id, source.author.id, source.author.name)
else:
prefix = "{} {} ({} #{}) {} ({})".format(source.server.id, source.channel.id, source.server.name, source.channel.name, source.author.id, source.author.name)
logging.debug("{} -> {}".format(prefix, safe_string(source.content)))
async def send_warn(client, source, message):
logging.warn("Unhandled exception: {}".format(safe_string(message)))
await send_message(client, source, "Something's wrong...\n```{}```".format(message))
class Commands():
@staticmethod
async def license(client, source, args):
await send_message(client, source, "I'm licensed under the GNU Affero General Public License.\nFor details, visit: https://www.gnu.org/licenses/agpl.html")
@staticmethod
async def source(client, source, args):
await send_message(client, source, "Get my source code here: https://github.com/TransportLayer/TransportLayerBot-Discord")
@staticmethod
async def test(client, source, args):
await send_message(client, source, "Tested!")
@staticmethod
async def converse(client, source, args):
for clever in active_clevers:
if source.channel == clever.session["channel"]:
await send_message(client, source, "We're already conversing!")
return
active_clevers.append(Clever(client, source))
await active_clevers[len(active_clevers) - 1].send_hello(client, source)
commands = {
"license": Commands.license,
"source": Commands.source,
"test": Commands.test,
"breakthebot": None,
"converse": Commands.converse
}
class Clever:
def __init__(self, client, source, name=None):
self.session = {
"bot": Cleverbot(),
"name": name,
"channel": source.channel,
"last_message": time()
}
async def send_hello(self, client, source):
await client.send_typing(source.channel)
await asyncio.sleep(1)
response = "Hello!"
if self.session["name"]:
response = "`{}`: {}".format(self.session["name"])
await send_message(client, source, response)
def format_out(self, text):
return text.replace('*', '\\*')
def format_in(self, text):
return text.replace('\\*', '*')
async def ask(self, client, source, no_prefix):
if time() - self.session["last_message"] >= 5:
self.session["last_message"] = time()
await asyncio.sleep(random.randint(0, 2))
if not no_prefix:
source.content = source.content[len(client.user.id) + 4:]
response = self.session["bot"].ask(self.format_in(source.content))
await asyncio.sleep(random.randint(int(len(source.content) / 30), int(len(source.content) / 20)))
await client.send_typing(source.channel)
await asyncio.sleep(len(response) / 15)
if self.session["name"]:
response = "`{}`: {}".format(self.session["name"], response)
await send_message(client, source, self.format_out(response).replace("TransportLayer", "walrus").replace("clever", "TransportLayer").replace("Clever", "TransportLayer").replace("CLEVER", "TransportLayer"))
else:
await send_message(client, source, "You're typing a bit too quickly for me, {}! Try again in a few seconds.".format(source.author.mention))
active_clevers = []
class TransportLayerBot(discord.Client):
async def on_ready(self):
logging.info("Logged in as {}, ID {}.".format(self.user.name, self.user.id))
async def send_logged_message(self, channel, message):
prefix = None
if channel.is_private:
prefix = "{} ({}) (DM)".format(channel.id, channel.name)
else:
prefix = "{} {} ({} #{})".format(channel.server.id, channel.id, channel.server.name, channel.name)
logging.debug("{} <- {}".format(prefix, safe_string(message)))
await self.send_message(channel, message)
warning_messages = (
"Something's wrong.",
"I don't feel so well.",
"Well, the server room is on fire again.",
"Do you smell something burning?",
"Snap, crackle, pop; Rice Krisp... Oh, wait, no, that's the server room on fire again.",
"TransportLayer! The server room's on fire!",
"Why did you do that?",
"Why are you doing this to me?",
"I think you broke it.",
"It broke.",
"You broke it.",
"Why is everything broken?",
"I think I'm going to cry.",
"You're making me cry.",
"WARNING! WARNING! WARNING!",
"Please make it stop.",
"I can't do this anymore.",
"I give up. *You* run the code.",
"I quit."
)
async def send_logged_warn(self, channel, message):
logging.warn("Unhandled exception: {}".format(safe_string(message)))
await self.send_logged_message(channel, "{}\n```\n{}\n```".format(random.choice(self.warning_messages), message))
async def receive_logged_message(self, message):
prefix = None
if message.channel.is_private:
prefix = "{} ({}) (DM)".format(message.channel.id, message.channel.name)
else:
prefix = "{} {} ({} #{}) {} ({})".format(message.server.id, message.channel.id, message.server.name, message.channel.name, message.author.id, message.author.name)
logging.debug("{} -> {}".format(prefix, safe_string(message.content)))
async def init_server_document(self, server, channel=None):
server_config = self.db.servers.find({"id": server.id})
if not server_config.count():
self.db.servers.insert_one(
{
"id": server.id,
"added": datetime.now(),
"settings": {
"prefix": "!"
}
}
)
if channel:
await self.send_logged_message(channel, "Initialised. You are using Alpha Development software.")
async def on_server_join(self, server):
await self.init_server_document(server, server.default_channel)
async def match_role(self, roles, query):
return discord.utils.find(lambda role: role.id.startswith(query) or role.name.startswith(query), roles)
# Begin Commands Block
async def command_source(self, message, args):
await self.send_logged_message(message.channel, "You can find my source code here:\nhttps://github.com/TransportLayer/TransportLayerBot-Discord")
async def command_license(self, message, args):
await self.send_logged_message(message.channel, "I'm licensed under the GNU Affero General Public License.\nhttps://www.gnu.org/licenses/agpl.html")
async def command_test(self, message, args):
args_echo = ''
if len(args) > 0:
args_echo = " `{}`".format(args)
await self.send_logged_message(message.channel, "Looks like the command interpretor is working :thumbsup:{}".format(args_echo))
async def command_roles(self, message, args):
if message.channel.permissions_for(message.server.me).manage_roles:
if len(args) > 0:
if args[0] == "all":
response = "All roles:"
role_iter = 0
for role in message.server.roles:
role_iter += 1
response += "\n`{}` (ID: `{}`)".format(role.name, role.id)
if role >= message.server.me.top_role:
response += " [Cannot Manage]"
if role_iter % 20 == 0:
role_iter = 0
await self.send_logged_message(message.channel, response)
await asyncio.sleep(0.25)
response = ""
await self.send_logged_message(message.channel, response)
elif args[0] == "get" and len(args) > 1:
matched = await self.match_role(message.server.roles, ' '.join(args[1:]))
if matched:
await self.send_logged_message(message.channel, "`{}` (ID: `{}`)".format(matched.name, matched.id))
else:
await self.send_logged_message(message.channel, "Not found.")
elif args[0] == "add":
matched = await self.match_role(message.server.roles, ' '.join(args[1:]))
if matched:
if matched < message.server.me.top_role:
self.db.roles.insert_one(
{
"owner": message.server.id,
"id": matched.id,
"added": datetime.now(),
"joinable": []
}
)
await self.send_logged_message(message.channel, "Now managing `{}` (ID: `{}`).".format(matched.name, matched.id))
else:
await self.send_logged_message(message.channel, "Sorry, I'm not able to manage `{}` (ID: `{}`).".format(matched.name, matched.id))
else:
await self.send_logged_message(message.channel, "Role not found.")
elif args[0] == "remove":
matched = await self.match_role(message.server.roles, ' '.join(args[1:]))
if matched and self.db.roles.find({"id": matched.id}):
self.db.roles.delete_many({"id": matched.id})
await self.send_logged_message(message.channel, "No longer managing `{}` (ID: `{}`).".format(matched.name, matched.id))
else:
await self.send_logged_message(message.channel, "Role not found.")
else:
managed_roles = self.db.roles.find({"owner": message.server.id})
if managed_roles.count():
response = "Managed roles:"
for document in managed_roles:
matched = await self.match_role(message.server.roles, document["id"])
if matched:
response += "\n`{}` (ID: `{}`)".format(matched.name, matched.id)
await self.send_logged_message(message.channel, response)
else:
await self.send_logged_message(message.channel, "I'm not currently managing any roles.")
else:
await self.send_logged_message(message.channel, "Sorry, I don't have permission to manage roles.")
default_commands = {
"source": command_source,
"license": command_license,
"test": command_test,
"none": None,
"roles": command_roles
}
# End Commands Block
async def on_message(self, message):
if not message.author.bot:
await self.receive_logged_message(message)
if message.channel.is_private:
await self.send_logged_message(message.channel, "Sorry, this bot doesn't work quite yet in DMs. :frowning:")
return
if message.content == "¤init" and message.author.id == "188013945699696640":
await self.init_server_document(message.server, message.channel)
return
if message.content == "¤dump" and message.author.id == "188013945699696640":
server_config = self.db.servers.find({"id": message.server.id})
if not server_config.count():
await self.send_logged_message(message.channel, "0 configuration document(s) found:")
else:
await self.send_logged_message(message.channel, "{} configuration document(s) found:\n```\n{}\n```".format(server_config.count(), server_config[0]))
return
if message.content == "¤erase" and message.author.id == "188013945699696640":
self.db.servers.delete_many({"id": message.server.id})
await self.send_logged_message(message.channel, "Server configuration documents erased.")
return
if message.content == "¤leave" and message.author.id == "188013945699696640":
await self.send_logged_message(message.channel, "RIP me. o/")
await self.leave_server(message.server)
return
server_config = self.db.servers.find({"id": message.server.id})
if not server_config.count():
return
if message.content.startswith(server_config[0]["settings"]["prefix"]):
command, *args = message.content[1:].split()
# Command not found in Server Commands; Using Default Commands.
if command in self.default_commands:
try:
await self.default_commands[command](self, message, args)
except Exception as e:
await self.send_logged_warn(message.channel, "!{} {}\n{}".format(command, args, e))
def main():
parser = argparse.ArgumentParser(description="TransportLayerBot for Discord")
parser.add_argument("-t", "--token", type=str, metavar="TOKEN", dest="TOKEN", help="bot user application token", action="store", required=True)
parser.add_argument("-l", "--log", type=str, metavar="LEVEL", dest="LOG_LEVEL", help="log level", action="store", default="INFO")
parser.add_argument("-o", "--output", type=str, metavar="FILE", dest="LOG_FILE", help="file to write logs to", action="store", default="TransportLayerBot.log")
parser.add_argument("-a", "--address", type=str, metavar="DB HOST", dest="DB_HOST", help="hostname or IP of database", action="store", default="127.0.0.1")
parser.add_argument("-p", "--port", type=int, metavar="DB PORT", dest="DB_PORT", help="port of database", action="store", default=27017)
parser.add_argument("-d", "--db", type=str, metavar="DATABASE", dest="DB", help="name of the database", action="store", default="tlbot")
SETTINGS = vars(parser.parse_args())
try:
print("""Welcome to TransportLayerBot!
This software is licensed under the GNU Affero General Public License.
See the LICENSE file for details.
Get the source: https://github.com/TransportLayer/TransportLayerBot-Discord
_____
| | _______ _
|_____| |___ ___| | | _
| | | | | / \\
______|______ | | | | /__/ __/__
__|__ __|__ __|__ | | | |___ / | _ /
| || || | |_| |_____| /__/ /_/ /
|_____||_____||_____|
""")
setup_logger(SETTINGS["LOG_LEVEL"], SETTINGS["LOG_FILE"])
mongo = MongoClient(host=SETTINGS["DB_HOST"], port=SETTINGS["DB_PORT"])
db = mongo[SETTINGS["DB"]]
db_meta = db.meta.find({"meta": "times"})
if db_meta.count():
logging.info("Using database \"{}\" created {}.".format(SETTINGS["DB"], db_meta[0]["created"]))
else:
logging.info("Creating new database \"{}\".".format(SETTINGS["DB"]))
db.meta.insert_one(
{
"meta": "times",
"created": datetime.now()
}
)
logging.info("Starting TransportLayerBot with Discord version {}.".format(discord.__version__))
client = TransportLayerBot()
client.db = db
client.run(SETTINGS["TOKEN"])
finally:
logging.info("Stopping.")
client.logout()
if __name__ == "__main__":
main()
|
TransportLayer/TransportLayerBot-Discord
|
main.py
|
Python
|
agpl-3.0
| 16,517
|
[
"VisIt"
] |
7caba217ca5a6020a63cd416a94d87602fda0a15d8379132deb5145adf94c24d
|
# from helpers import download_mesh
# import meshplex
#
# import numpy as np
# import unittest
#
#
# class GradientTest(unittest.TestCase):
#
# def setUp(self):
# return
#
# def _run_test(self, mesh):
# num_nodes = len(mesh.points)
# # Create function 2*x + 3*y.
# a_x = 7.0
# a_y = 3.0
# a0 = 1.0
# u = a_x * mesh.points[:, 0] + \
# a_y * mesh.points[:, 1] + \
# a0 * np.ones(num_nodes)
# # Get the gradient analytically.
# sol = np.empty((num_nodes, 2))
# sol[:, 0] = a_x
# sol[:, 1] = a_y
# # Compute the gradient numerically.
# grad_u = mesh.compute_gradient(u)
#
# tol = 1.0e-13
# for k in range(num_nodes):
# self.assertAlmostEqual(grad_u[k][0], sol[k][0], delta=tol)
# self.assertAlmostEqual(grad_u[k][1], sol[k][1], delta=tol)
# return
#
# def test_pacman(self):
# filename = download_mesh(
# 'pacman.vtk',
# '2da8ff96537f844a95a83abb48471b6a'
# )
# mesh, _, _, _ = meshplex.read(filename)
# self._run_test(mesh)
# return
#
#
# if __name__ == '__main__':
# unittest.main()
|
nschloe/voropy
|
tests/mesh_tri/test_gradient.py
|
Python
|
mit
| 1,254
|
[
"VTK"
] |
04fa3eec10e8a792fd5e2930fc8f34068db6b184576f7c6be07271c480f4383b
|
#!/usr/bin/env python3
"""
Template by pypi-mobans
"""
import os
import sys
import codecs
import locale
import platform
from shutil import rmtree
from setuptools import setup, Extension
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
# This work around is only if a project supports Python < 3.4
# Work around for locale not being set
try:
lc = locale.getlocale()
pf = platform.system()
if pf != "Windows" and lc == (None, None):
locale.setlocale(locale.LC_ALL, "C.UTF-8")
except (ValueError, UnicodeError, locale.Error):
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
NAME = "libxlsxwpy"
AUTHOR = "chfw"
VERSION = "0.0.2"
EMAIL = "info@pyexcel.org"
LICENSE = "New BSD"
DESCRIPTION = (
"A plain python wrapper for libxlsxwriter, a C library."
)
URL = "https://github.com/pyexcel/libxlsxwpy"
DOWNLOAD_URL = "%s/archive/0.0.2.tar.gz" % URL
FILES = ["README.rst", "CHANGELOG.rst"]
KEYWORDS = [
"python",
'xlsx'
]
CLASSIFIERS = [
"Topic :: Software Development :: Libraries",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy'
]
PYTHON_REQUIRES = ">=3.6"
INSTALL_REQUIRES = [
"xlsxwriter",
]
SETUP_COMMANDS = {}
PYMODULE = Extension(
'libxlsxwpy',
sources=[
"pymodule.c",
"book.c",
"sheet.c",
],
libraries=INSTALL_REQUIRES
)
# You do not need to read beyond this line
PUBLISH_COMMAND = "{0} setup.py sdist bdist_wheel upload -r pypi".format(sys.executable)
HERE = os.path.abspath(os.path.dirname(__file__))
def has_gease():
"""
test if github release command is installed
visit http://github.com/moremoban/gease for more info
"""
try:
import gease # noqa
return True
except ImportError:
return False
def read_files(*files):
"""Read files into setup"""
text = ""
for single_file in files:
content = read(single_file)
text = text + content + "\n"
return text
def read(afile):
"""Read a file into setup"""
the_relative_file = os.path.join(HERE, afile)
with codecs.open(the_relative_file, "r", "utf-8") as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content
def filter_out_test_code(file_handle):
found_test_code = False
for line in file_handle.readlines():
if line.startswith(".. testcode:"):
found_test_code = True
continue
if found_test_code is True:
if line.startswith(" "):
continue
else:
empty_line = line.strip()
if len(empty_line) == 0:
continue
else:
found_test_code = False
yield line
else:
for keyword in ["|version|", "|today|"]:
if keyword in line:
break
else:
yield line
if __name__ == "__main__":
setup(
test_suite="tests",
name=NAME,
author=AUTHOR,
version=VERSION,
author_email=EMAIL,
description=DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
long_description=read_files(*FILES),
license=LICENSE,
keywords=KEYWORDS,
ext_modules=[PYMODULE],
classifiers=CLASSIFIERS,
)
|
pyexcel/libxlsxwpy
|
setup.py
|
Python
|
bsd-3-clause
| 3,872
|
[
"VisIt"
] |
cb151fd0d3d67e6b7873125929627113f242a9330439ab217cb94480fe13a526
|
import logging
import time
import pysam
from mitty.lib.bedfile import read_bed
logger = logging.getLogger(__name__)
# NOTE TO SELF: Though it seems like we could convert the psyam variant handling structures to something
# that looks like it is more useful for us re: read generation, it is not worth the effort and the loss
# in modularity and maintainability. It is better to simply have functions that convert the VCF records
# to the structure used by read generation at a later stage.
# The pysam variant class is a little too convoluted for our use, so we simplify the fields to this
# and add some useful information
# Note the absence of the CHROM field
class Variant(object):
__slots__ = ('pos', 'ref', 'alt', 'cigarop', 'oplen')
def __init__(self, pos, ref, alt, cigarop, oplen):
self.pos = pos
self.ref = ref
self.alt = alt
self.cigarop = cigarop
self.oplen = oplen
def tuple(self):
return self.pos, self.ref, self.alt, self.cigarop, self.oplen
def __repr__(self):
return self.tuple().__repr__()
# Unphased variants always go into chrom copy 0|1
# We get results in the form of a ploid_bed
def load_variant_file(fname, sample, bed_fname):
"""Use pysam to read in a VCF file and convert it into a form suitable for use in Mitty
:param fname:
:param sample:
:return: dict of numpy recarrays
"""
mode = 'rb' if fname.endswith('bcf') else 'r'
vcf_fp = pysam.VariantFile(fname, mode)
vcf_fp.subset_samples([sample])
return [
split_copies(region,
[v for v in vcf_fp.fetch(contig=region[0], start=region[1], stop=region[2])],
sniff_ploidy(vcf_fp, region[0]))
for region in read_bed(bed_fname)
]
def sniff_ploidy(vcf_fp, contig):
v = next(vcf_fp.fetch(contig=contig), None)
ploidy = 2
if v is not None:
ploidy = len(v.samples[0]['GT'])
logger.debug(
'Contig: {}, ploidy: {} {}'.format(contig, ploidy,
'(Assumed. Contig was empty)' if v is None else ''))
return ploidy
def fetch_first_variant_in_contig_as_empty(vcf_fp, contig):
v = next(vcf_fp.fetch(contig=contig), None)
if v is not None:
v.samples[0]['GT'] = (0,) * len(v.samples[0]['GT'])
return v
def split_copies(region, vl, ploidy):
"""Given a list of pysam.cbcf.VariantRecord split it into multiple lists, one for each chromosome copy
:param vl:
:return: [c1|0, c0|1]
"""
return {
'region': region,
'v': [
parse_vl(vl, cpy=cpy, ploidy=ploidy)
for cpy in range(ploidy)
]
}
def parse_vl(vl, cpy, ploidy):
v_check = UnusableVariantFilter(ploidy)
return list(filter(None, (parse(v, cpy=cpy, v_check=v_check) for v in vl)))
def parse(v, cpy, v_check):
"""Take a pysam.cbcf.VariantRecord and convert it into a Variant object
:param v: variant
:param cpy: 0 = 1|0, 1 = 0|1 OR
0 = 1|0|0, 1 = 0|1|0, 2 = 0|0|1 etc
:return: Variant(object)
"""
if v.samples[0]['GT'][cpy] == 0: # Not present in this copy
return None
if v_check.unusable(v):
logger.error("Unusable variants present in VCF. Please filter or refactor these.")
raise ValueError("Unusable variants present in VCF. Please filter or refactor these.")
alt = v.samples[0].alleles[cpy]
l_r, l_a = len(v.ref), len(alt)
if l_r == 1:
if l_a == 1:
op, op_len = 'X', 0
else:
op, op_len = 'I', l_a - l_r
elif l_a == 1:
op, op_len = 'D', l_r - l_a
else:
raise ValueError("Complex variants present in VCF. Please filter or refactor these.")
return Variant(v.pos, v.ref, v.samples[0].alleles[cpy], op, op_len)
class UnusableVariantFilter:
def __init__(self, ploidy):
self.p_overlap = [0] * ploidy
self.last_variant = [(0, '', '') for _ in range(ploidy)]
def unusable(self, v):
"""Return True if we can use this variant
:param v:
:return:
"""
var = v.samples.values()[0]
is_unusable = \
self._complex_variant(v) or \
self._angle_bracketed_id(v, var) or \
self._breakend_replacement(v) or \
self._illegal_overlap(v, var, self.p_overlap) or \
self._duplicate_variant(v, var, self.last_variant)
if not is_unusable:
for n, (g, alt) in enumerate(zip(var['GT'], var.alleles)):
# lv -> (pos, ref, alt)
if g:
self.p_overlap[n] = v.stop - 1
self.last_variant[n] = (v.pos, v.ref, alt)
return is_unusable
@staticmethod
def _complex_variant(_v):
for alt in _v.alts:
if _v.rlen > 1 and len(alt) > 1:
logger.debug('Complex variant {}:{} {} -> {}'.format(_v.contig, _v.pos, _v.ref, _v.alts))
return True
return False
@staticmethod
def _angle_bracketed_id(_v, var):
for alt in var.alleles:
if alt[0] == '<':
logger.debug('Angle bracketed variant entry {}:{} {} -> {}'.format(_v.contig, _v.pos, _v.ref, var.alleles))
return True
return False
@staticmethod
def _breakend_replacement(_v):
if _v.info.get('SVTYPE', None) == 'BND':
logger.debug('Breakend entry {}:{} {} -> {}'.format(_v.contig, _v.pos, _v.ref, _v.alts))
return True
return False
@staticmethod
def _illegal_overlap(_v, var, _p_overlap):
is_illegal = False
for n, (g, alt, po) in enumerate(zip(var['GT'], var.alleles, _p_overlap)):
# _v.start and po are 0-indexed
if g:
if len(alt) == len(_v.ref) == 1: # SNP
start = _v.start # A SNP's footprint is where it is
else:
start = _v.start + 1 # INS and DEL affect one base over
if start <= po: # This is overlapping don't use the variant
is_illegal = True
logger.debug('Illegal overlap {}:{} {} -> {} (previous variant ends at {})'.format(_v.contig, _v.pos, _v.ref, _v.alts, po + 1))
break
return is_illegal
@staticmethod
def _duplicate_variant(_v, var, _last_variant):
is_duplicate = False
for n, (g, alt, lv) in enumerate(zip(var['GT'], var.alleles, _last_variant)):
# lv -> (pos, ref, alt)
if g:
if (lv[0] == _v.pos) & (lv[1] == _v.ref) & (lv[2] == alt):
is_duplicate = True
logger.debug(
'Duplicate line {}:{} {} -> {}'.format(_v.contig, _v.pos, _v.ref, _v.alts))
break
return is_duplicate
def prepare_variant_file(fname_in, sample, bed_fname, fname_out, write_mode='w'):
"""Prepare a variant file with only the given sample, complex and illegal variant calls
filtered out, and restricted to the given bed file
:param fname_in:
:param sample:
:param bed_fname:
:param fname_out:
:return: - output is to file
"""
logger.debug('Starting filtering ...')
t0 = time.time()
mode = 'rb' if fname_in.endswith('bcf') else 'r'
vcf_in = pysam.VariantFile(fname_in, mode)
vcf_in.subset_samples([sample])
vcf_out = pysam.VariantFile(fname_out, mode=write_mode, header=vcf_in.header)
processed_cnt, exclude_cnt, include_cnt = 0, 0, 0
contig_dict = set()
for region in read_bed(bed_fname):
logger.debug('Filtering {}'.format(region))
n, this_include_cnt = -1, 0
empty_gt = None
if region[0] not in contig_dict:
empty_gt = fetch_first_variant_in_contig_as_empty(vcf_in, region[0])
contig_dict.add(region[0])
v_check = UnusableVariantFilter(sniff_ploidy(vcf_in, contig=region[0]))
for n, v in enumerate(vcf_in.fetch(contig=region[0], start=region[1], stop=region[2])):
if not any(v.samples.values()[0]['GT']): continue # This variant does not exist in this sample
if v_check.unusable(v):
exclude_cnt += 1
continue
vcf_out.write(v)
this_include_cnt += 1
if this_include_cnt == 0 and empty_gt is not None:
vcf_out.write(empty_gt)
include_cnt += this_include_cnt
processed_cnt += (n + 1)
logger.debug('Processed {} variants'.format(processed_cnt))
logger.debug('Sample had {} variants'.format(exclude_cnt + include_cnt))
logger.debug('Discarded {} variants'.format(exclude_cnt))
t1 = time.time()
logger.debug('Took {} s'.format(t1 - t0))
|
sbg/Mitty
|
mitty/lib/vcfio.py
|
Python
|
apache-2.0
| 8,078
|
[
"pysam"
] |
2908e881b96e1ed735a6f94c47473fe0c34a243e0aeafbd80d16431c4ce17985
|
"""Only External Repos url specific constants module"""
CUSTOM_FILE_REPO = 'https://fixtures.pulpproject.org/file/'
CUSTOM_KICKSTART_REPO = 'http://ftp.cvut.cz/centos/8/BaseOS/x86_64/kickstart/'
CUSTOM_RPM_REPO = 'https://fixtures.pulpproject.org/rpm-signed/'
CUSTOM_RPM_SHA_512 = 'https://fixtures.pulpproject.org/rpm-with-sha-512/'
FAKE_5_YUM_REPO = 'http://{0}:{1}@rplevka.fedorapeople.org/fakerepo01/'
FAKE_YUM_DRPM_REPO = 'https://fixtures.pulpproject.org/drpm-signed/'
FAKE_YUM_SRPM_REPO = 'https://fixtures.pulpproject.org/srpm-signed/'
FAKE_YUM_SRPM_DUPLICATE_REPO = 'https://fixtures.pulpproject.org/srpm-duplicate/'
FAKE_YUM_MD5_REPO = 'https://fixtures.pulpproject.org/rpm-with-md5/'
FAKE_7_PUPPET_REPO = 'http://{0}:{1}@rplevka.fedorapeople.org/fakepuppet01/'
# Fedora's OSTree repo changed to a single repo at
# https://kojipkgs.fedoraproject.org/compose/ostree/repo/
# With branches for each version. Some tests (test_positive_update_url) still need 2 repos URLs,
# We will use the archived versions for now, but probably need to revisit this.
FEDORA26_OSTREE_REPO = 'https://kojipkgs.fedoraproject.org/compose/ostree-20190207-old/26/'
FEDORA27_OSTREE_REPO = 'https://kojipkgs.fedoraproject.org/compose/ostree-20190207-old/26/'
OSTREE_REPO = 'https://fixtures.pulpproject.org/ostree/small/'
FAKE_0_YUM_REPO_STRING_BASED_VERSIONS = (
'https://fixtures.pulpproject.org/rpm-string-version-updateinfo/'
)
ANSIBLE_GALAXY = 'https://galaxy.ansible.com/'
ANSIBLE_HUB = 'https://cloud.redhat.com/api/automation-hub/'
|
rplevka/robottelo
|
robottelo/constants/repos.py
|
Python
|
gpl-3.0
| 1,532
|
[
"Galaxy"
] |
b6376f900b9896fd252d8eb178ecdb83bcf166e665130da4caf1f741ab1d85a4
|
import numpy as np
from evaluate import binary_metrics, precision_recall
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
def plot_retina_results(predicted, event, max_angle, search_traces = None, against = 'true'):
thetas, phis, response = event.get_grid()
m, test, predicted_mapping, test_mapping = binary_metrics(predicted, event, max_angle=max_angle, against = against)
recognized = predicted_mapping == 1
test_recognized = test_mapping == 1
ghost = predicted_mapping == 0
unrecognized = test_mapping == 0
plt.figure(figsize=(48, 48))
plt.contourf(thetas, phis, response, 40, cmap=cm.gist_gray)
plt.colorbar()
plt.scatter(predicted[recognized, 0], predicted[recognized, 1], color="green", marker="+",
label="Recognized (%d)" % np.sum(test_recognized), s=80)
plt.scatter(test[test_recognized, 0], test[test_recognized, 1], color="green", marker="o",
s=40)
plt.scatter(predicted[ghost, 0], predicted[ghost, 1], color="red", marker="x",
label="Ghost (%d)" % np.sum(ghost), s=80)
plt.scatter(test[unrecognized, 0], test[unrecognized, 1], color="red", marker="o",
label="Unrecognized (%d)" % np.sum(unrecognized), s=80)
if search_traces is not None:
for trace in search_traces:
xs = [ p[0] for p in trace ]
ys = [ p[1] for p in trace ]
plt.plot(xs, ys, color="blue")
plt.legend()
return plt
def plot_precision_recall(predicted, event, against = 'true', max_angle = 1.0e-2):
_, precision, recall = precision_recall(predicted, event, against, max_angle)
plt.figure()
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.05])
plt.plot(recall, precision)
plt.xlabel("Recall")
plt.ylabel("Precision")
return plt
def plot_event_mayavi(event, tracks = None):
from mayavi import mlab
mlab.figure(bgcolor=(1, 1, 1))
mlab.points3d(event[:, 0], event[:, 1], event[:, 2], color=(0.9, 0, 0.2), opacity=1.0, scale_factor=0.1)
if tracks is not None:
n_tracks = tracks.shape[0]
tracks = np.vstack([
[0, 0, 0],
tracks
])
connections = np.vstack([np.zeros(n_tracks + 1, dtype="int64"), np.arange(n_tracks + 1)]).T
points = mlab.points3d(
tracks[:, 0], tracks[:, 1], tracks[:, 2],
scale_mode='none',
scale_factor=0.03,
color=(0, 0, 1)
)
points.mlab_source.dataset.lines = connections
points.mlab_source.update()
mlab.pipeline.surface(
points, color=(0, 0, 1),
representation='wireframe',
line_width=2,
opacity=0.5,
name='Connections'
)
mlab.show()
def mininal_sq(ps):
maxs = np.max(ps, axis=0)
mins = np.min(ps, axis=0)
delta = np.max(maxs - mins) / 2
centers = (maxs + mins) / 2
b = centers - delta
t = centers + delta
return [ [b[i], t[i]] for i in range(b.shape[0]) ]
def plot_event_plotly(event, tracks, unrecognized_tracks=None, filename="retina"):
import plotly.graph_objs as go
import plotly.tools as tls
import plotly.plotly as py
def with_return_to_origin(t):
t1 = np.zeros(t.shape[0] * 2)
t1[1::2] = t
return t1
hits_3d = go.Scatter3d(
x = event[:, 2],
y = event[:, 0],
z = event[:, 1],
mode = "markers",
marker = {"size" : 3.0}
)
tracks_3d = go.Scatter3d (
x = with_return_to_origin(tracks[:, 2]), # x coords
y = with_return_to_origin(tracks[:, 0]), # y coords
z = with_return_to_origin(tracks[:, 1]), # z coords
mode = 'lines', # (!) draw lines between coords (as in Scatter)
line = dict(
color = "green",
width=2
)
)
tracks_unrecognised_3d = go.Scatter3d (
x = with_return_to_origin(unrecognized_tracks[:, 2]), # x coords
y = with_return_to_origin(unrecognized_tracks[:, 0]), # y coords
z = with_return_to_origin(unrecognized_tracks[:, 1]), # z coords
mode = 'lines', # (!) draw lines between coords (as in Scatter)
line = dict(
color = "red",
width=2
)
)
data=[hits_3d, tracks_3d, tracks_unrecognised_3d]
box = mininal_sq(event)
layout = go.Layout (
title="Retina"
# scene = go.Scene(
# xaxis=dict(range=box[0]),
# yaxis=dict(range=box[1]),
# zaxis=dict(range=box[2])
# )
)
fig = go.Figure(data=data, layout=layout)
return fig
|
maxim-borisyak/pyretina
|
pyretina/plot.py
|
Python
|
mit
| 4,355
|
[
"Mayavi"
] |
d1833a815cc89afdca7387cba6fe53d96dd79e24f84b80e4b16fdf44a2ee4f53
|
import json
from coalib.bearlib import deprecate_settings
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.NpmRequirement import NpmRequirement
from coala_utils.param_conversion import negate
def bool_or_str(value):
try:
return bool(value)
except:
return str(value)
def bool_or_int(value):
try:
return bool(value)
except:
return int(value)
@linter(executable='jshint',
output_format='regex',
output_regex=r'.+?: line (?P<line>\d+), col (?P<column>\d+), '
r'(?P<message>.+) \((?P<severity>[EWI])\d+\)')
class JSHintBear:
"""
Detect errors and potential problems in JavaScript code and to enforce
appropriate coding conventions. For example, problems like syntax errors,
bugs due to implicit type conversion, leaking variables and much more
can be detected.
For more information on the analysis visit <http://jshint.com/>
"""
LANGUAGES = {"JavaScript"}
REQUIREMENTS = {NpmRequirement('jshint', '2')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Formatting', 'Syntax', 'Complexity', 'Unused Code'}
@staticmethod
@deprecate_settings(es_version='use_es6_syntax',
javascript_strictness=(
"allow_global_strict",
lambda x: "global" if x else True),
cyclomatic_complexity='maxcomplexity',
allow_unused_variables=('prohibit_unused', negate),
max_parameters='maxparams',
allow_missing_semicolon='allow_missing_semicol',
allow_this_statements='allow_this_stmt',
allow_with_statements='allow_with_stmt',
allow_bitwise_operators=('prohibit_bitwise', negate),
max_statements='maxstatements',
max_depth='maxdepth',
allow_comma_operator=('prohibit_comma', negate),
allow_non_breaking_whitespace=(
'prohibit_non_breaking_whitespace', negate),
allow_prototype_overwrite=(
'prohibit_prototype_overwrite', negate),
allow_type_coercion=('prohibit_type_coercion', negate),
allow_future_identifiers=('future_hostile', negate),
allow_typeof=('prohibit_typeof', negate),
allow_var_statement=(
'prohibit_variable_statements', negate),
allow_grouping_operator=('prohibit_groups', negate),
allow_variable_shadowing='shadow',
use_mozilla_extension='using_mozilla',
allow_constructor_functions=('prohibit_new', negate),
allow_argument_caller_and_callee=(
'prohibit_arg', negate),
allow_iterator_property=('iterator', negate),
allow_filter_in_forin='force_filter_forin')
def generate_config(filename, file,
allow_bitwise_operators: bool=False,
allow_prototype_overwrite: bool=False,
force_braces: bool=True,
allow_type_coercion: bool=False,
allow_future_identifiers: bool=True,
allow_typeof: bool=True,
allow_filter_in_forin: bool=True,
allow_funcscope: bool=False,
allow_iterator_property: bool=True,
allow_argument_caller_and_callee: bool=False,
allow_comma_operator: bool=True,
allow_non_breaking_whitespace: bool=False,
allow_constructor_functions: bool=True,
allow_grouping_operator: bool=True,
allow_var_statement: bool=True,
allow_missing_semicolon: bool=False,
allow_debugger: bool=False,
allow_assignment_comparisions: bool=False,
allow_eval: bool=False,
allow_increment: bool=False,
allow_proto: bool=False,
allow_scripturls: bool=False,
allow_singleton: bool=False,
allow_this_statements: bool=False,
allow_with_statements: bool=False,
use_mozilla_extension: bool=False,
javascript_strictness: bool_or_str=True,
allow_noyield: bool=False,
allow_eqnull: bool=False,
allow_last_semicolon: bool=False,
allow_func_in_loop: bool=False,
allow_expr_in_assignments: bool=False,
use_es3_array: bool=False,
environment_mootools: bool=False,
environment_couch: bool=False,
environment_jasmine: bool=False,
environment_jquery: bool=False,
environment_node: bool=False,
environment_qunit: bool=False,
environment_rhino: bool=False,
environment_shelljs: bool=False,
environment_prototypejs: bool=False,
environment_yui: bool=False,
environment_mocha: bool=True,
environment_module: bool=False,
environment_wsh: bool=False,
environment_worker: bool=False,
environment_nonstandard: bool=False,
environment_browser: bool=True,
environment_browserify: bool=False,
environment_devel: bool=True,
environment_dojo: bool=False,
environment_typed: bool=False,
environment_phantom: bool=False,
max_statements: bool_or_int=False,
max_depth: bool_or_int=False,
max_parameters: bool_or_int=False,
cyclomatic_complexity: bool_or_int=False,
allow_variable_shadowing: bool_or_str=False,
allow_unused_variables: bool_or_str=False,
allow_latedef: bool_or_str=False,
es_version: bool_or_int=5,
jshint_config: str=""):
"""
:param allow_bitwise_operators:
Allows the use of bitwise operators.
:param allow_prototype_overwrite:
This options allows overwriting prototypes of native objects such
as ``Array``.
:param force_braces:
This option requires you to always put curly braces around blocks
in loops and conditionals.
:param allow_type_coercion:
This options allows the use of ``==`` and ``!=``.
:param allow_future_identifiers:
This option allows the use of identifiers which are defined in
future versions of JavaScript.
:param allow_typeof:
This option enables warnings about invalid ``typeof`` operator
values.
:param allow_filter_in_forin:
This option requires all ``for in`` loops to filter object's items.
:param allow_iterator_property:
This option suppresses warnings about the ``__iterator__``
property.
:param allow_funcscope:
This option suppresses warnings about declaring variables inside of
control structures while accessing them later from outside.
:param allow_argument_caller_and_callee:
This option allows the use of ``arguments.caller`` and
``arguments.callee``.
:param allow_comma_operator:
This option allows the use of the comma operator.
:param allow_non_breaking_whitespace:
Allows "non-breaking whitespace characters".
:param allow_constructor_functions:
Allows the use of constructor functions.
:param allow_grouping_operator:
This option allows the use of the grouping operator when it is
not strictly required.
:param allow_var_statement:
Allows the use of the ``var`` statement while declaring a variable.
Should use ``let`` or ``const`` while it is set to ``False``.
:param allow_missing_semicolon:
This option suppresses warnings about missing semicolons.
:param allow_debugger:
This option suppresses warnings about the ``debugger`` statements.
:param allow_assignment_comparisions:
This option suppresses warnings about the use of assignments in
cases where comparisons are expected.
:param allow_eval:
This options suppresses warnings about the use of ``eval``
function.
:param allow_increment:
This option suppresses warnings about the use of unary increment
and decrement operators.
:param allow_proto:
This option suppresses warnings about the ``__proto__`` property.
:param allow_scripturls:
This option suppresses warnings about the use of script-targeted
URLs.
:param allow_singleton:
This option suppresses warnings about constructions like
``new function () { ... }`` and ``new Object;`` sometimes used to
produce singletons.
:param allow_this_statements:
This option suppresses warnings about possible strict violations
when the code is running in strict mode and ``this`` is used in a
non-constructor function.
:param allow_with_statements:
This option suppresses warnings about the use of the ``with``
statement.
:param use_mozilla_extension:
This options tells JSHint that your code uses Mozilla JavaScript
extensions.
:param javascript_strictness:
Determines what sort of strictness to use in the JavaScript code.
The possible options are:
- "global" - there must be a ``"use strict";`` at global level
- "implied" - lint the code as if there is a ``"use strict";``
- "False" - disable warnings about strict mode
- "True" - there must be a ``"use strict";`` at function level
:param allow_noyield:
This option suppresses warnings about generator functions with no
``yield`` statement in them.
:param allow_eqnull:
This option suppresses warnings about ``== null`` comparisons.
:param allow_last_semicolon:
This option suppresses warnings about missing semicolons for the
last statement.
:param allow_func_in_loop:
This option suppresses warnings about functions inside of loops.
:param allow_expr_in_assignments:
This option suppresses warnings about the use of expressions where
normally assignments or function calls are expected.
:param use_es3_array:
This option tells JSHintBear ES3 array elision elements, or empty
elements are used.
:param environment_mootools:
This option defines globals exposed by the Mootools.
:param environment_couch:
This option defines globals exposed by CouchDB.
:param environment_jasmine:
This option defines globals exposed by Jasmine.
:param environment_jquery:
This option defines globals exposed by Jquery.
:param environment_node:
This option defines globals exposed by Node.
:param environment_qunit:
This option defines globals exposed by Qunit.
:param environment_rhino:
This option defines globals exposed when the code is running inside
rhino runtime environment.
:param environment_shelljs:
This option defines globals exposed by the ShellJS.
:param environment_prototypejs:
This option defines globals exposed by the Prototype.
:param environment_yui:
This option defines globals exposed by the YUI JavaScript
Framework.
:param environment_mocha:
This option defines globals exposed by the "BDD" and "TDD" UIs of
the Mocha unit testing framework.
:param environment_module:
This option informs JSHintBear that the input code describes an
ECMAScript 6 module.
:param environment_wsh:
This option defines globals available when the code is running as a
script for the Windows Script Host.
:param environment_worker:
This option defines globals available when the code is running
inside of a Web Worker.
:param environment_nonstandard:
This option defines non- standard but widely adopted globals such
as ``escape`` and ``unescape``.
:param environment_browser:
This option defines globals exposed by modern browsers.
:param environment_browserify:
This option defines globals available when using the Browserify.
:param environment_devel:
This option defines globals that are usually used for debugging:
``console``, ``alert``, etc.
:param environment_dojo:
This option defines globals exposed by the Dojo Toolkit.
:param environment_typed:
This option defines globals for typed array constructors.
:param environment_phantom:
This option defines globals available when your core is running
inside of the PhantomJS runtime environment.
:param max_statements:
Maximum number of statements allowed per function.
:param max_depth:
This option lets you control how nested do you want your blocks to
be.
:param max_parameters:
Maximum number of parameters allowed per function.
:param cyclomatic_complexity:
Maximum cyclomatic complexity in the code.
:param allow_variable_shadowing:
This option suppresses warnings about variable shadowing i.e.
declaring a variable that had been already declared somewhere in
the outer scope.
- "inner" - check for variables defined in the same scope only
- "outer" - check for variables defined in outer scopes as well
- False - same as inner
- True - allow variable shadowing
:param allow_unused_variables:
Allows when variables are defined but never used. This can be set
to ""vars"" to only check for variables, not function parameters,
or ""strict"" to check all variables and parameters.
:param allow_latedef:
This option allows the use of a variable before it was defined.
Setting this option to "nofunc" will allow function declarations to
be ignored.
:param es_version:
This option is used to specify the ECMAScript version to which the
code must adhere to.
"""
# Assume that when es_version is bool, it is intended for the
# deprecated use_es6_version
if es_version is True:
es_version = 6
elif es_version is False:
es_version = 5
if not jshint_config:
options = {"bitwise": not allow_bitwise_operators,
"freeze": not allow_prototype_overwrite,
"curly": force_braces,
"eqeqeq": not allow_type_coercion,
"futurehostile": not allow_future_identifiers,
"notypeof": not allow_typeof,
"forin": allow_filter_in_forin,
"funcscope": allow_funcscope,
"iterator": not allow_iterator_property,
"noarg": not allow_argument_caller_and_callee,
"nocomma": not allow_comma_operator,
"nonbsp": not allow_non_breaking_whitespace,
"nonew": not allow_constructor_functions,
"undef": True,
"singleGroups": not allow_grouping_operator,
"varstmt": not allow_var_statement,
"asi": allow_missing_semicolon,
"debug": allow_debugger,
"boss": allow_assignment_comparisions,
"evil": allow_eval,
"strict": javascript_strictness,
"plusplus": allow_increment,
"proto": allow_proto,
"scripturl": allow_scripturls,
"supernew": allow_singleton,
"validthis": allow_this_statements,
"withstmt": allow_with_statements,
"moz": use_mozilla_extension,
"noyield": allow_noyield,
"eqnull": allow_eqnull,
"lastsemic": allow_last_semicolon,
"loopfunc": allow_func_in_loop,
"expr": allow_expr_in_assignments,
"elision": use_es3_array,
"mootools": environment_mootools,
"couch": environment_couch,
"jasmine": environment_jasmine,
"jquery": environment_jquery,
"node": environment_node,
"qunit": environment_qunit,
"rhino": environment_rhino,
"shelljs": environment_shelljs,
"prototypejs": environment_prototypejs,
"yui": environment_yui,
"mocha": environment_mocha,
"module": environment_module,
"wsh": environment_wsh,
"worker": environment_worker,
"nonstandard": environment_nonstandard,
"browser": environment_browser,
"browserify": environment_browserify,
"devel": environment_devel,
"dojo": environment_dojo,
"typed": environment_typed,
"phantom": environment_phantom,
"maxerr": 99999,
"maxcomplexity": cyclomatic_complexity,
"maxdepth": max_depth,
"maxparams": max_parameters,
"maxstatements": max_statements,
"shadow": allow_variable_shadowing,
"unused": not allow_unused_variables,
"latedef": allow_latedef,
"esversion": es_version}
return json.dumps(options)
else:
return None
@staticmethod
def create_arguments(filename, file, config_file, jshint_config: str=""):
"""
:param jshint_config:
The location of the jshintrc config file. If this option is present
all the above options are not used. Instead the .jshintrc file is
used as the configuration file.
"""
args = ('--verbose', filename, '--config')
if jshint_config:
args += (jshint_config,)
else:
args += (config_file,)
return args
|
dosarudaniel/coala-bears
|
bears/js/JSHintBear.py
|
Python
|
agpl-3.0
| 20,188
|
[
"VisIt"
] |
f0e16fa4a4d653db26e4f0bbb009d47cf79e8930ad96ad4ec2acfd9fe5ae46f3
|
from setuptools import setup
setup(
name='metasort',
version='0.3.6.3',
packages=['metasort', ],
license='MIT',
url='http://github.com/phelimb/metasort',
description='Filter reads based on taxonomy assignment from One Codex.',
author='Phelim Bradley, Gil Goncalves',
author_email='wave@phel.im, lursty@gmail.com',
install_requires=["requests==2.5.3","Biopython","onecodex","Flask==0.10.1"],
entry_points={
'console_scripts': [
'metasort = metasort.cli:main',
]
}
)
|
Phelimb/metasort
|
setup.py
|
Python
|
mit
| 537
|
[
"Biopython"
] |
cfb1d7ff5cefc3258c8fdc5162c5dfd18bebafc3a38f2d9f75f0f074ba681a13
|
#!/usr/bin/env python
"""Shows how to view data created by `tvtk.tools.mlab` with
mayavi2.
"""
# Author: Prabhu Ramachandran <prabhu@aero.iitb.ac.in>
# Copyright (c) 2006-2007, Enthought Inc.
# License: BSD Style.
import numpy
from mayavi.scripts import mayavi2
from tvtk.tools import mlab
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi.filters.warp_scalar import WarpScalar
from mayavi.modules.outline import Outline
from mayavi.modules.surface import Surface
def make_data():
"""Make some test numpy data and create a TVTK data object from it
that we will visualize.
"""
def f(x, y):
"""Some test function.
"""
return numpy.sin(x*y)/(x*y)
x = numpy.arange(-7., 7.05, 0.1)
y = numpy.arange(-5., 5.05, 0.05)
s = mlab.SurfRegular(x, y, f)
return s.data
def add_data(tvtk_data):
"""Add a TVTK data object `tvtk_data` to the mayavi pipleine.
"""
d = VTKDataSource()
d.data = tvtk_data
mayavi.add_source(d)
def surf_regular():
"""Now visualize the data as done in mlab.
"""
w = WarpScalar()
mayavi.add_filter(w)
o = Outline()
s = Surface()
mayavi.add_module(o)
mayavi.add_module(s)
@mayavi2.standalone
def main():
mayavi.new_scene()
d = make_data()
add_data(d)
surf_regular()
if __name__ == '__main__':
main()
|
dmsurti/mayavi
|
examples/mayavi/advanced_visualization/surf_regular_mlab.py
|
Python
|
bsd-3-clause
| 1,366
|
[
"Mayavi"
] |
b668fffed00badfef65767d2529f3457f46e6f15a6bbbab18bef731dc095e862
|
#!/usr/bin/env python
import argparse
import logging
import sys
from BCBio import GFF
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqFeature import (
FeatureLocation,
SeqFeature
)
from Bio.SeqRecord import SeqRecord
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
def parse_xmfa(xmfa):
"""Simple XMFA parser until https://github.com/biopython/biopython/pull/544
"""
current_lcb = []
current_seq = {}
for line in xmfa.readlines():
if line.startswith('#'):
continue
if line.strip() == '=':
if 'id' in current_seq:
current_lcb.append(current_seq)
current_seq = {}
yield current_lcb
current_lcb = []
else:
line = line.strip()
if line.startswith('>'):
if 'id' in current_seq:
current_lcb.append(current_seq)
current_seq = {}
data = line.strip().split()
id, loc = data[1].split(':')
start, end = loc.split('-')
current_seq = {
'rid': '_'.join(data[1:]),
'id': id,
'start': int(start),
'end': int(end),
'strand': 1 if data[2] == '+' else -1,
'seq': ''
}
else:
current_seq['seq'] += line.strip()
def _percent_identity(a, b):
"""Calculate % identity, ignoring gaps in the host sequence
"""
match = 0
mismatch = 0
for char_a, char_b in zip(list(a), list(b)):
if char_a == '-':
continue
if char_a == char_b:
match += 1
else:
mismatch += 1
if match + mismatch == 0:
return 0
return 100 * float(match) / (match + mismatch)
def _id_tn_dict(sequences):
"""Figure out sequence IDs
"""
label_convert = {}
if sequences is not None:
if len(sequences) == 1:
for i, record in enumerate(SeqIO.parse(sequences[0], 'fasta')):
label_convert[str(i + 1)] = record.id
else:
for i, sequence in enumerate(sequences):
for record in SeqIO.parse(sequence, 'fasta'):
label_convert[str(i + 1)] = record.id
continue
return label_convert
def convert_xmfa_to_gff3(xmfa_file, relative_to='1', sequences=None, window_size=1000):
label_convert = _id_tn_dict(sequences)
lcbs = parse_xmfa(xmfa_file)
records = [SeqRecord(Seq("A"), id=label_convert.get(relative_to, relative_to))]
for lcb in lcbs:
ids = [seq['id'] for seq in lcb]
# Doesn't match part of our sequence
if relative_to not in ids:
continue
# Skip sequences that are JUST our "relative_to" genome
if len(ids) == 1:
continue
parent = [seq for seq in lcb if seq['id'] == relative_to][0]
others = [seq for seq in lcb if seq['id'] != relative_to]
for other in others:
other['feature'] = SeqFeature(
FeatureLocation(parent['start'], parent['end'] + 1),
type="match", strand=parent['strand'],
qualifiers={
"source": "progressiveMauve",
"target": label_convert.get(other['id'], other['id']),
"ID": label_convert.get(other['id'], 'xmfa_' + other['rid'])
}
)
for i in range(0, len(lcb[0]['seq']), window_size):
block_seq = parent['seq'][i:i + window_size]
real_window_size = len(block_seq)
real_start = abs(parent['start']) - parent['seq'][0:i].count('-') + i
real_end = real_start + real_window_size - block_seq.count('-')
if (real_end - real_start) < 10:
continue
if parent['start'] < 0:
strand = -1
else:
strand = 1
for other in others:
pid = _percent_identity(block_seq, other['seq'][i:i + real_window_size])
# Ignore 0% identity sequences
if pid == 0:
continue
# Support for Biopython 1.68 and above, which removed sub_features
if not hasattr(other['feature'], "sub_features"):
other['feature'].sub_features = []
other['feature'].sub_features.append(
SeqFeature(
FeatureLocation(real_start, real_end),
type="match_part", strand=strand,
qualifiers={
"source": "progressiveMauve",
'score': pid
}
)
)
for other in others:
records[0].features.append(other['feature'])
return records
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert XMFA alignments to gff3', prog='xmfa2gff3')
parser.add_argument('xmfa_file', type=file, help='XMFA File')
parser.add_argument('--window_size', type=int, help='Window size for analysis', default=1000)
parser.add_argument('--relative_to', type=str, help='Index of the parent sequence in the MSA', default='1')
parser.add_argument('--sequences', type=file, nargs='+',
help='Fasta files (in same order) passed to parent for reconstructing proper IDs')
parser.add_argument('--version', action='version', version='%(prog)s 1.0')
args = parser.parse_args()
result = convert_xmfa_to_gff3(**vars(args))
GFF.write(result, sys.stdout)
|
dpryan79/tools-iuc
|
tools/progressivemauve/xmfa2gff3.py
|
Python
|
mit
| 5,764
|
[
"Biopython"
] |
6141168b8c591383578a6e859cbcbec35a5c60fb3984f769e136568dfb5420dc
|
# Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import fixtures
import mock
from mox3 import mox
import netaddr
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import importutils
from oslo_utils import netutils
import six
import testtools
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova import objects
from nova.objects import network as network_obj
from nova.objects import virtual_interface as vif_obj
from nova import quota
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_ldap
from nova.tests.unit import fake_network
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_fixed_ip
from nova.tests.unit.objects import test_floating_ip
from nova.tests.unit.objects import test_network
from nova.tests.unit.objects import test_service
from nova.tests.unit import utils as test_utils
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '2001:db9:0:1::10',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
@testtools.skipIf(test_utils.is_osx(),
'IPv6 pretty-printing broken on OSX, see bug 1409135')
def test_get_instance_nw_info_fake(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1:dcad:beff:feef:%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_valid_fixed_ipv6(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'2001:db9:0:1::10')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **networks[1])])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(4, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_end(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, dhcp_server='192.168.0.11',
allowed_start='192.168.0.10',
allowed_end='192.168.0.245')
self.assertEqual(1, len(nets))
network = nets[0]
# gateway defaults to beginning of allowed_start
self.assertEqual('192.168.0.10', network['gateway'])
# vpn_server doesn't conflict with dhcp_start
self.assertEqual('192.168.0.12', network['vpn_private_address'])
# dhcp_start doesn't conflict with dhcp_server
self.assertEqual('192.168.0.13', network['dhcp_start'])
# NOTE(vish): 10 from the beginning, 10 from the end, and
# 1 for the gateway, 1 for the dhcp server,
# 1 for the vpn server
self.assertEqual(23, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_out_of_range(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AddressOutOfRange,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_start='192.168.1.10')
def test_validate_reserved_end_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidAddress,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_end='invalid')
def test_validate_cidr_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidCidr,
self.network.create_networks,
context_admin, 'fake', 'invalid', False,
1, 256)
def test_validate_non_int_size(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidIntValue,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 'invalid')
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
@mock.patch('nova.objects.fixed_ip.FixedIPList.get_by_instance_uuid')
def test_get_instance_nw_info(self, get):
def make_ip(index):
vif = objects.VirtualInterface(uuid=index, address=index)
network = objects.Network(uuid=index,
bridge=index,
label=index,
project_id=index,
injected=False,
netmask='255.255.255.0',
dns1=None,
dns2=None,
cidr_v6=None,
gateway_v6=None,
broadcast_v6=None,
netmask_v6=None,
rxtx_base=None,
gateway='192.168.%s.1' % index,
dhcp_server='192.168.%s.1' % index,
broadcast='192.168.%s.255' % index,
cidr='192.168.%s.0/24' % index)
return objects.FixedIP(virtual_interface=vif,
network=network,
floating_ips=objects.FloatingIPList(),
address='192.168.%s.2' % index)
objs = [make_ip(index) for index in ('3', '1', '2')]
get.return_value = objects.FixedIPList(objects=objs)
nw_info = self.network.get_instance_nw_info(self.context, None,
None, None)
for i, vif in enumerate(nw_info):
self.assertEqual(vif['network']['bridge'], objs[i].network.bridge)
@mock.patch.object(objects.Network, 'get_by_id')
def test_add_fixed_ip_instance_using_id_without_vpn(self, get_by_id):
# Allocate a fixed ip from a network and assign it to an instance.
# Network is given by network id.
network_id = networks[0]['id']
with mock.patch.object(self.network,
'allocate_fixed_ip') as allocate_fixed_ip:
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
network_id)
# Assert that we fetched the network by id, not uuid
get_by_id.assert_called_once_with(self.context,
network_id, project_only='allow_none')
# Assert that we called allocate_fixed_ip for the given network and
# instance. We should not have requested a specific address from the
# network.
allocate_fixed_ip.assert_called_once_with(self.context, FAKEUUID,
get_by_id.return_value,
address=None)
@mock.patch.object(objects.Network, 'get_by_uuid')
def test_add_fixed_ip_instance_using_uuid_without_vpn(self, get_by_uuid):
# Allocate a fixed ip from a network and assign it to an instance.
# Network is given by network uuid.
network_uuid = networks[0]['uuid']
with mock.patch.object(self.network,
'allocate_fixed_ip') as allocate_fixed_ip,\
mock.patch.object(self.context, 'elevated',
return_value=mock.sentinel.elevated):
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
network_uuid)
# Assert that we fetched the network by uuid, not id, and with elevated
# context
get_by_uuid.assert_called_once_with(mock.sentinel.elevated,
network_uuid)
# Assert that we called allocate_fixed_ip for the given network and
# instance. We should not have requested a specific address from the
# network.
allocate_fixed_ip.assert_called_once_with(self.context,
FAKEUUID,
get_by_uuid.return_value,
address=None)
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
def test_allocate_fixed_ip_instance_dns(self):
# Test DNS entries are created when allocating a fixed IP.
# Allocate a fixed IP to an instance. Ensure that dns entries have been
# created for the instance's name and uuid.
network = network_obj.Network._from_db_object(
self.context, network_obj.Network(), test_network.fake_network)
network.save = mock.MagicMock()
# Create a minimal instance object
instance_params = {
'display_name': HOST,
'security_groups': []
}
instance = fake_instance.fake_instance_obj(
context.RequestContext('ignore', 'ignore'),
expected_attrs=instance_params.keys(), **instance_params)
instance.save = mock.MagicMock()
# We don't specify a specific address, so we should get a FixedIP
# automatically allocated from the pool. Fix its value here.
fip = objects.FixedIP(address='192.168.0.101')
fip.save = mock.MagicMock()
with mock.patch.object(objects.Instance, 'get_by_uuid',
return_value=instance),\
mock.patch.object(objects.FixedIP, 'associate_pool',
return_value=fip):
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
instance_manager = self.network.instance_dns_manager
expected_addresses = ['192.168.0.101']
# Assert that we have a correct entry by instance display name
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(expected_addresses, addresses)
# Assert that we have a correct entry by instance uuid
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(expected_addresses, addresses)
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_allocate_calculates_quota_auth(self, util_method, reserve,
get_by_uuid):
inst = objects.Instance()
inst['uuid'] = 'nosuch'
get_by_uuid.return_value = inst
usages = {'fixed_ips': {'in_use': 10, 'reserved': 1}}
reserve.side_effect = exception.OverQuota(overs='testing',
quotas={'fixed_ips': 10},
usages=usages)
util_method.return_value = ('foo', 'bar')
self.assertRaises(exception.FixedIpLimitExceeded,
self.network.allocate_fixed_ip,
self.context, 123, {'uuid': 'nosuch'})
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_deallocate_calculates_quota_auth(self, util_method, reserve,
get_by_address):
inst = objects.Instance(uuid='fake-uuid')
fip = objects.FixedIP(instance_uuid='fake-uuid',
virtual_interface_id=1)
get_by_address.return_value = fip
util_method.return_value = ('foo', 'bar')
# This will fail right after the reserve call when it tries
# to look up the fake instance we created above
self.assertRaises(exception.InstanceNotFound,
self.network.deallocate_fixed_ip,
self.context, '1.2.3.4', instance=inst)
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
def test_allocate_fixed_ip_cleanup(self,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1)
mock_fixedip_associate.return_value = fip
instance = objects.Instance(context=self.context)
instance.create()
mock_instance_get.return_value = instance
mock_vif_get.return_value = vif_obj.VirtualInterface(
instance_uuid='fake-uuid', id=1)
with contextlib.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
mock_setup_network.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=address)
mock_dns_manager.delete_entry.assert_has_calls([
mock.call(instance.display_name, ''),
mock.call(instance.uuid, '')
])
mock_fixedip_disassociate.assert_called_once_with(self.context)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate_pool')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
@mock.patch('nova.network.manager.NetworkManager._add_virtual_interface')
def test_allocate_fixed_ip_create_new_vifs(self,
mock_add,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1)
net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}
instance = objects.Instance(context=self.context)
instance.create()
vif = objects.VirtualInterface(context,
id=1000,
address='00:00:00:00:00:00',
instance_uuid=instance.uuid,
network_id=net['id'],
uuid='nosuch')
mock_fixedip_associate.return_value = fip
mock_add.return_value = vif
mock_instance_get.return_value = instance
mock_vif_get.return_value = None
with contextlib.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
self.network.allocate_fixed_ip(self.context, instance['uuid'],
net)
mock_add.assert_called_once_with(self.context, instance['uuid'],
net['id'])
self.assertEqual(fip.virtual_interface_id, vif.id)
class FlatDHCPNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(FlatDHCPNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class VlanNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(objects.QuotasNoOp,
self.network.quotas_cls)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
instance = db.instance_create(self.context, {})
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
self.context_admin,
instance['uuid'],
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
@mock.patch('nova.network.manager.VlanManager._setup_network_on_host')
@mock.patch('nova.network.manager.VlanManager.'
'_validate_instance_zone_for_dns_domain')
@mock.patch('nova.network.manager.VlanManager.'
'_do_trigger_security_group_members_refresh_for_instance')
@mock.patch('nova.network.manager.VlanManager._add_virtual_interface')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
@mock.patch('nova.objects.VirtualInterface.get_by_instance_and_network')
def test_allocate_fixed_ip_return_none(self, mock_get, mock_save,
mock_associate, mock_get_uuid, mock_add, mock_trigger,
mock_validate, mock_setup):
net = {'cidr': '24', 'id': 1, 'uuid': 'nosuch'}
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=netaddr.IPAddress('1.2.3.4'),
virtual_interface_id=1)
instance = objects.Instance(context=self.context)
instance.create()
vif = objects.VirtualInterface(self.context,
id=1000,
address='00:00:00:00:00:00',
instance_uuid=instance.uuid,
network_id=net['id'],
uuid='nosuch')
mock_associate.return_value = fip
mock_add.return_value = vif
mock_get.return_value = None
mock_get_uuid.return_value = instance
mock_validate.return_value = False
self.network.allocate_fixed_ip(self.context_admin, instance.uuid, net)
mock_add.assert_called_once_with(self.context_admin, instance.uuid,
net['id'])
mock_save.assert_called_once_with()
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch',
'vpn_private_address': netaddr.IPAddress('1.2.3.4')
}, vpn=1)
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1, reserved=True)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
self.assertEqual(networks[1]["vlan"], 103)
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_parameter(self):
# vlan parameter could not be greater than 4094
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=4095, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be greater than 4094'
self.assertIn(error_msg, six.text_type(exc))
# vlan parameter could not be less than 1
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=0, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be less than 1'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_be_integer(self):
# vlan must be an integer
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan='fake', cidr='192.168.0.1/24')
error_msg = 'vlan must be an integer'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_multiple_without_dhcp_server(self):
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["dhcp_server"], "192.168.3.1")
self.assertEqual(networks[1]["dhcp_server"], "192.168.3.129")
def test_vlan_multiple_with_dhcp_server(self):
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100, dhcp_server='192.168.3.1')
self.assertEqual(networks[0]["dhcp_server"], "192.168.3.1")
self.assertEqual(networks[1]["dhcp_server"], "192.168.3.1")
@mock.patch('nova.db.network_get')
def test_validate_networks(self, net_get):
def network_get(_context, network_id, project_only='allow_none'):
return dict(test_network.fake_network, **networks[network_id])
net_get.side_effect = network_get
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id + '1')
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id='testproject')
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network, '_floating_ip_pool_exists',
lambda _x, _y: True)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=1)
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
mock_commit.assert_called_once_with(ctxt, 'reserve',
project_id='testproject')
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
network=test_network.fake_network)
# floating ip that's already associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1)
# floating ip that isn't associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
'1.2.3.4',
'1.2.3.5',
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
instance_uuid='fake_uuid',
network=test_network.fake_network)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
@mock.patch('nova.db.floating_ip_get_all_by_host')
@mock.patch('nova.db.fixed_ip_get')
def _test_floating_ip_init_host(self, fixed_get, floating_get,
public_interface, expected_arg):
floating_get.return_value = [
dict(test_floating_ip.fake_floating_ip,
interface='foo',
address='1.2.3.4'),
dict(test_floating_ip.fake_floating_ip,
interface='fakeiface',
address='1.2.3.5',
fixed_ip_id=1),
dict(test_floating_ip.fake_floating_ip,
interface='bar',
address='1.2.3.6',
fixed_ip_id=2),
]
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network=test_network.fake_network)
raise exception.FixedIpNotFound(id=fixed_ip_id)
fixed_get.side_effect = fixed_ip_get
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
netaddr.IPAddress('1.2.3.4'),
expected_arg,
mox.IsA(objects.Network))
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface=False,
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# floating ip that is associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
project_id=ctxt.project_id)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False,
host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
auto_assigned=True,
project_id=ctxt.project_id)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.Forbidden,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.Forbidden,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.Forbidden,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
'DE:AD:BE:EF:00:00')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def _deallocate_fixed_with_dhcp(self, mock_dev_exists, fixed_update,
net_get, fixed_get):
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
with contextlib.nested(
mock.patch.object(db, 'virtual_interface_get', vif_get),
mock.patch.object(
utils, 'execute',
side_effect=processutils.ProcessExecutionError()),
) as (_vif_get, _execute):
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1,
instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(
test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr.address,
'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
mock_dev_exists.assert_called_once_with(networks[1]['bridge'])
if mock_dev_exists.return_value:
_execute.assert_called_once_with('dhcp_release',
networks[1]['bridge'],
fix_addr.address,
'DE:AD:BE:EF:00:00',
run_as_root=True)
@mock.patch('nova.network.linux_net.device_exists', return_value=True)
def test_deallocate_fixed_with_dhcp(self, mock_dev_exists):
self._deallocate_fixed_with_dhcp(mock_dev_exists)
@mock.patch('nova.network.linux_net.device_exists', return_value=False)
def test_deallocate_fixed_without_dhcp(self, mock_dev_exists):
self._deallocate_fixed_with_dhcp(mock_dev_exists)
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': 'project1'})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
fixed_update.return_value = fixed_get.return_value
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = objects.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in six.iteritems(kwargs):
self.__setattr__(k, v)
class FakeNetwork(object):
def __init__(self, **kwargs):
self.vlan = None
for k, v in six.iteritems(kwargs):
self.__setattr__(k, v)
def __getitem__(self, item):
return getattr(self, item)
class CommonNetworkTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
self.flags(use_local=True, group='conductor')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'fixed_ip_get_by_instance')
manager.db.fixed_ip_get_by_instance(
self.context, FAKEUUID).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info(self, fixed_get,
instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
manager.deallocate_for_instance(
ctx, instance=objects.Instance._from_db_object(self.context,
objects.Instance(), instance_get.return_value))
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info_with_update_dns_entries(
self, fixed_get):
self.flags(update_dns_entries=True)
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
with mock.patch.object(manager.network_rpcapi,
'update_dns') as mock_update_dns:
manager.deallocate_for_instance(
ctx, instance=fake_instance.fake_instance_obj(ctx))
mock_update_dns.assert_called_once_with(ctx, ['123'])
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
def test_deallocate_for_instance_with_requested_networks(self):
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = mock.Mock()
ctx = context.RequestContext('igonre', 'igonre')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in [('123', '1.2.3.4'), ('123', '4.3.2.1'),
('123', None)]])
manager.deallocate_for_instance(
ctx,
instance=fake_instance.fake_instance_obj(ctx),
requested_networks=requested_networks)
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
], manager.deallocate_fixed_ip_calls)
def test_deallocate_for_instance_with_update_dns_entries(self):
self.flags(update_dns_entries=True)
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = mock.Mock()
ctx = context.RequestContext('igonre', 'igonre')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in [('123', '1.2.3.4'), ('123', '4.3.2.1')]])
with mock.patch.object(manager.network_rpcapi,
'update_dns') as mock_update_dns:
manager.deallocate_for_instance(
ctx,
instance=fake_instance.fake_instance_obj(ctx),
requested_networks=requested_networks)
mock_update_dns.assert_called_once_with(ctx, ['123'])
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
@mock.patch('nova.db.fixed_ip_disassociate')
def test_remove_fixed_ip_from_instance(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
get.return_value = [
dict(test_fixed_ip.fake_fixed_ip, **x)
for x in manager.db.fixed_ip_get_by_instance(None,
FAKEUUID)]
manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
HOST,
'10.0.0.1')
self.assertEqual(manager.deallocate_called, '10.0.0.1')
disassociate.assert_called_once_with(self.context, '10.0.0.1')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_remove_fixed_ip_from_instance_bad_input(self, get):
manager = fake_network.FakeNetworkManager()
get.return_value = []
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
def test_create_networks_with_uuid(self):
cidr = '192.168.0.0/24'
uuid = FAKEUUID
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
kwargs = {'uuid': uuid}
nets = manager.create_networks(*args, **kwargs)
self.assertEqual(1, len(nets))
net = nets[0]
self.assertEqual(uuid, net['uuid'])
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
@mock.patch('nova.objects.quotas.Quotas.rollback')
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.network.manager.NetworkManager.'
'_do_trigger_security_group_members_refresh_for_instance')
def test_fixed_ip_cleanup_rollback(self, fake_trig,
fixed_get, rollback):
manager = network_manager.NetworkManager()
fake_trig.side_effect = test.TestingException
self.assertRaises(test.TestingException,
manager.deallocate_fixed_ip,
self.context, 'fake', 'fake',
instance=fake_inst(uuid='ignoreduuid'))
rollback.assert_called_once_with()
def test_fixed_cidr_out_of_range(self):
manager = network_manager.NetworkManager()
ctxt = context.get_admin_context()
self.assertRaises(exception.AddressOutOfRange,
manager.create_networks, ctxt, label="fake",
cidr='10.1.0.0/24', fixed_cidr='10.1.1.0/25')
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.NoDBTestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.flags(use_local=True, group='conductor')
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(AllocateTestCase, self).setUp()
dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.user_context = context.RequestContext('testuser',
'testproject')
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = objects.Instance(context=self.context)
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create()
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.user_context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEqual(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(netutils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance=inst)
def test_allocate_for_instance_illegal_network(self):
networks = db.network_get_all(self.context)
requested_networks = []
for network in networks:
# set all networks to other projects
db.network_update(self.context, network['id'],
{'host': self.network.host,
'project_id': 'otherid'})
requested_networks.append((network['uuid'], None))
# set the first network to our project
db.network_update(self.context, networks[0]['id'],
{'project_id': self.user_context.project_id})
inst = objects.Instance(context=self.context)
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create()
self.assertRaises(exception.NetworkNotFoundForProject,
self.network.allocate_for_instance, self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=self.context.project_id, macs=None,
requested_networks=requested_networks)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance,
self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
REQUIRES_LOCKING = True
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.service_get_by_host_and_binary')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
service_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=12)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
service_get.return_value = test_service.fake_service
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_associate_floating_ip_multi_host_calls(self, floating_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=None)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocate_floating_ip_quota_rollback(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
self.mox.StubOutWithMock(self.network,
'_floating_ip_owned_by_project')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.reserve(self.context,
floating_ips=-1,
project_id='testproject').AndReturn('fake-rsv')
self.network._floating_ip_owned_by_project(self.context,
mox.IgnoreArg())
db.floating_ip_deallocate(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
quota.QUOTAS.rollback(self.context, 'fake-rsv',
project_id='testproject')
self.mox.ReplayAll()
self.network.deallocate_floating_ip(self.context, '10.0.0.1')
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.deleted = True
instance.create()
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance(context=self.context)
instance.project_id = self.project_id
instance.create()
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_get_by_address')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_start(self, floating_update, floating_get,
fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
floating_get.side_effect = fake_floating_ip_get_by_address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not str(fixed_ip) == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_finish(self, floating_update, fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 2)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[1]['domain'], domain2)
self.assertEqual(domains[0]['project'], 'testproject')
self.assertEqual(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
network=test_network.fake_network)
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'1.2.3.4', '1.2.3.5', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
mock_get.return_value = mock.sentinel.floating
self.assertEqual(mock.sentinel.floating,
self.network.get_floating_ip_by_address(
self.context,
mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
mock_get.return_value = mock.sentinel.floatings
self.assertEqual(mock.sentinel.floatings,
self.network.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context, self.context.project_id)
@mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
def test_get_floating_ips_by_fixed_address(self, mock_get):
mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
objects.FloatingIP(address='5.6.7.8')]
self.assertEqual(['1.2.3.4', '5.6.7.8'],
self.network.get_floating_ips_by_fixed_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.db.floating_ip_get_pools')
def test_floating_ip_pool_exists(self, floating_ip_get_pools):
floating_ip_get_pools.return_value = [{'name': 'public'}]
self.assertTrue(self.network._floating_ip_pool_exists(self.context,
'public'))
@mock.patch('nova.db.floating_ip_get_pools')
def test_floating_ip_pool_does_not_exist(self, floating_ip_get_pools):
floating_ip_get_pools.return_value = []
self.assertFalse(self.network._floating_ip_pool_exists(self.context,
'public'))
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.NoDBTestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.network.ldapdns.ldap',
fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
|
akash1808/nova
|
nova/tests/unit/network/test_manager.py
|
Python
|
apache-2.0
| 160,748
|
[
"FEFF"
] |
dba8716a433bc6d1a7fc4f3309493d34f3956cacad00641278c3d5592a63593b
|
from matplotlib import pyplot
from math import cos, sin, atan
class Neuron():
def __init__(self, x, y):
self.x = x
self.y = y
def draw(self, neuron_radius):
circle = pyplot.Circle((self.x, self.y), radius=neuron_radius, fill=False)
pyplot.gca().add_patch(circle)
class Layer():
def __init__(self, network, number_of_neurons, number_of_neurons_in_widest_layer):
self.vertical_distance_between_layers = 6
self.horizontal_distance_between_neurons = 2
self.neuron_radius = 0.5
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.previous_layer = self.__get_previous_layer(network)
self.y = self.__calculate_layer_y_position()
self.neurons = self.__intialise_neurons(number_of_neurons)
def __intialise_neurons(self, number_of_neurons):
neurons = []
x = self.__calculate_left_margin_so_layer_is_centered(number_of_neurons)
for iteration in xrange(number_of_neurons):
neuron = Neuron(x, self.y)
neurons.append(neuron)
x += self.horizontal_distance_between_neurons
return neurons
def __calculate_left_margin_so_layer_is_centered(self, number_of_neurons):
return self.horizontal_distance_between_neurons * (self.number_of_neurons_in_widest_layer - number_of_neurons) / 2
def __calculate_layer_y_position(self):
if self.previous_layer:
return self.previous_layer.y + self.vertical_distance_between_layers
else:
return 0
def __get_previous_layer(self, network):
if len(network.layers) > 0:
return network.layers[-1]
else:
return None
def __line_between_two_neurons(self, neuron1, neuron2):
angle = atan((neuron2.x - neuron1.x) / float(neuron2.y - neuron1.y))
x_adjustment = self.neuron_radius * sin(angle)
y_adjustment = self.neuron_radius * cos(angle)
line = pyplot.Line2D((neuron1.x - x_adjustment, neuron2.x + x_adjustment), (neuron1.y - y_adjustment, neuron2.y + y_adjustment))
pyplot.gca().add_line(line)
def draw(self, layerType=0):
for neuron in self.neurons:
neuron.draw( self.neuron_radius )
if self.previous_layer:
for previous_layer_neuron in self.previous_layer.neurons:
self.__line_between_two_neurons(neuron, previous_layer_neuron)
# write Text
x_text = self.number_of_neurons_in_widest_layer * self.horizontal_distance_between_neurons
if layerType == 0:
pyplot.text(x_text, self.y, 'Input Layer', fontsize = 12)
elif layerType == -1:
pyplot.text(x_text, self.y, 'Output Layer', fontsize = 12)
else:
pyplot.text(x_text, self.y, 'Hidden Layer '+str(layerType), fontsize = 12)
class NeuralNetwork():
def __init__(self, number_of_neurons_in_widest_layer):
self.number_of_neurons_in_widest_layer = number_of_neurons_in_widest_layer
self.layers = []
self.layertype = 0
def add_layer(self, number_of_neurons ):
layer = Layer(self, number_of_neurons, self.number_of_neurons_in_widest_layer)
self.layers.append(layer)
def draw(self):
pyplot.figure()
for i in range( len(self.layers) ):
layer = self.layers[i]
if i == len(self.layers)-1:
i = -1
layer.draw( i )
pyplot.axis('scaled')
pyplot.axis('off')
pyplot.title( 'Neural Network architecture', fontsize=15 )
pyplot.show()
class DrawNN():
def __init__( self, neural_network ):
self.neural_network = neural_network
def draw( self ):
widest_layer = max( self.neural_network )
network = NeuralNetwork( widest_layer )
for l in self.neural_network:
network.add_layer(l)
network.draw()
network = DrawNN( [2,8,8,1] )
network.draw()
|
dewtx29/python_ann
|
python/pydev_ann/LearnNN/learnNN6.py
|
Python
|
gpl-3.0
| 3,999
|
[
"NEURON"
] |
8fca13405026b6d402f9c819fd91d85a04a7086d441ddc1018da938df7971e06
|
tutorial_tests = """
Let's try a simple generator:
>>> def f():
... yield 1
... yield 2
>>> for i in f():
... print i
1
2
>>> g = f()
>>> g.next()
1
>>> g.next()
2
"Falling off the end" stops the generator:
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
StopIteration
"return" also stops the generator:
>>> def f():
... yield 1
... return
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 3, in f
StopIteration
>>> g.next() # once stopped, can't be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
"raise StopIteration" stops the generator too:
>>> def f():
... yield 1
... raise StopIteration
... yield 2 # never reached
...
>>> g = f()
>>> g.next()
1
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>> g.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
However, they are not exactly equivalent:
>>> def g1():
... try:
... return
... except:
... yield 1
...
>>> list(g1())
[]
>>> def g2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(g2())
[42]
This may be surprising at first:
>>> def g3():
... try:
... return
... finally:
... yield 1
...
>>> list(g3())
[1]
Let's create an alternate range() function implemented as a generator:
>>> def yrange(n):
... for i in range(n):
... yield i
...
>>> list(yrange(5))
[0, 1, 2, 3, 4]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print "creator", r.next()
... return r
...
>>> def caller():
... r = creator()
... for i in r:
... print "caller", i
...
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
...
>>> list(zrange(5))
[0, 1, 2, 3, 4]
"""
# The examples from PEP 255.
pep_tests = """
Specification: Yield
Restriction: A generator cannot be resumed while it is actively
running:
>>> def g():
... i = me.next()
... yield i
>>> me = g()
>>> me.next()
Traceback (most recent call last):
...
File "<string>", line 2, in g
ValueError: generator already executing
Specification: Return
Note that return isn't always equivalent to raising StopIteration: the
difference lies in how enclosing try/except constructs are treated.
For example,
>>> def f1():
... try:
... return
... except:
... yield 1
>>> print list(f1())
[]
because, as in any function, return simply exits, but
>>> def f2():
... try:
... raise StopIteration
... except:
... yield 42
>>> print list(f2())
[42]
because StopIteration is captured by a bare "except", as is any
exception.
Specification: Generators and Exception Propagation
>>> def f():
... return 1//0
>>> def g():
... yield f() # the zero division exception propagates
... yield 42 # and we'll never get here
>>> k = g()
>>> k.next()
Traceback (most recent call last):
File "<stdin>", line 1, in ?
File "<stdin>", line 2, in g
File "<stdin>", line 2, in f
ZeroDivisionError: integer division or modulo by zero
>>> k.next() # and the generator cannot be resumed
Traceback (most recent call last):
File "<stdin>", line 1, in ?
StopIteration
>>>
Specification: Try/Except/Finally
>>> def f():
... try:
... yield 1
... try:
... yield 2
... 1//0
... yield 3 # never get here
... except ZeroDivisionError:
... yield 4
... yield 5
... raise
... except:
... yield 6
... yield 7 # the "raise" above stops this
... except:
... yield 8
... yield 9
... try:
... x = 12
... finally:
... yield 10
... yield 11
>>> print list(f())
[1, 2, 4, 5, 8, 9, 10, 11]
>>>
Guido's binary tree example.
>>> # A binary tree class.
>>> class Tree:
...
... def __init__(self, label, left=None, right=None):
... self.label = label
... self.left = left
... self.right = right
...
... def __repr__(self, level=0, indent=" "):
... s = level*indent + repr(self.label)
... if self.left:
... s = s + "\\n" + self.left.__repr__(level+1, indent)
... if self.right:
... s = s + "\\n" + self.right.__repr__(level+1, indent)
... return s
...
... def __iter__(self):
... return inorder(self)
>>> # Create a Tree from a list.
>>> def tree(list):
... n = len(list)
... if n == 0:
... return []
... i = n // 2
... return Tree(list[i], tree(list[:i]), tree(list[i+1:]))
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # A recursive generator that generates Tree labels in in-order.
>>> def inorder(t):
... if t:
... for x in inorder(t.left):
... yield x
... yield t.label
... for x in inorder(t.right):
... yield x
>>> # Show it off: create a tree.
>>> t = tree("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
>>> # Print the nodes of the tree in in-order.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
>>> # A non-recursive generator.
>>> def inorder(node):
... stack = []
... while node:
... while node.left:
... stack.append(node)
... node = node.left
... yield node.label
... while not node.right:
... try:
... node = stack.pop()
... except IndexError:
... return
... yield node.label
... node = node.right
>>> # Exercise the non-recursive generator.
>>> for x in t:
... print x,
A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
"""
# Examples from Iterator-List and Python-Dev and c.l.py.
email_tests = """
The difference between yielding None and returning it.
>>> def g():
... for i in range(3):
... yield None
... yield None
... return
>>> list(g())
[None, None, None, None]
Ensure that explicitly raising StopIteration acts like any other exception
in try/except, not like a return.
>>> def g():
... yield 1
... try:
... raise StopIteration
... except:
... yield 2
... yield 3
>>> list(g())
[1, 2, 3]
Next one was posted to c.l.py.
>>> def gcomb(x, k):
... "Generate all combinations of k elements from list x."
...
... if k > len(x):
... return
... if k == 0:
... yield []
... else:
... first, rest = x[0], x[1:]
... # A combination does or doesn't contain first.
... # If it does, the remainder is a k-1 comb of rest.
... for c in gcomb(rest, k-1):
... c.insert(0, first)
... yield c
... # If it doesn't contain first, it's a k comb of rest.
... for c in gcomb(rest, k):
... yield c
>>> seq = range(1, 5)
>>> for k in range(len(seq) + 2):
... print "%d-combs of %s:" % (k, seq)
... for c in gcomb(seq, k):
... print " ", c
0-combs of [1, 2, 3, 4]:
[]
1-combs of [1, 2, 3, 4]:
[1]
[2]
[3]
[4]
2-combs of [1, 2, 3, 4]:
[1, 2]
[1, 3]
[1, 4]
[2, 3]
[2, 4]
[3, 4]
3-combs of [1, 2, 3, 4]:
[1, 2, 3]
[1, 2, 4]
[1, 3, 4]
[2, 3, 4]
4-combs of [1, 2, 3, 4]:
[1, 2, 3, 4]
5-combs of [1, 2, 3, 4]:
From the Iterators list, about the types of these things.
>>> def g():
... yield 1
...
>>> type(g)
<type 'function'>
>>> i = g()
>>> type(i)
<type 'generator'>
>>> [s for s in dir(i) if not s.startswith('_')]
['close', 'gi_code', 'gi_frame', 'gi_running', 'next', 'send', 'throw']
>>> print i.next.__doc__
x.next() -> the next value, or raise StopIteration
>>> iter(i) is i
True
>>> import types
>>> isinstance(i, types.GeneratorType)
True
And more, added later.
>>> i.gi_running
0
>>> type(i.gi_frame)
<type 'frame'>
>>> i.gi_running = 42
Traceback (most recent call last):
...
TypeError: readonly attribute
>>> def g():
... yield me.gi_running
>>> me = g()
>>> me.gi_running
0
>>> me.next()
1
>>> me.gi_running
0
A clever union-find implementation from c.l.py, due to David Eppstein.
Sent: Friday, June 29, 2001 12:16 PM
To: python-list@python.org
Subject: Re: PEP 255: Simple Generators
>>> class disjointSet:
... def __init__(self, name):
... self.name = name
... self.parent = None
... self.generator = self.generate()
...
... def generate(self):
... while not self.parent:
... yield self
... for x in self.parent.generator:
... yield x
...
... def find(self):
... return self.generator.next()
...
... def union(self, parent):
... if self.parent:
... raise ValueError("Sorry, I'm not a root!")
... self.parent = parent
...
... def __str__(self):
... return self.name
>>> names = "ABCDEFGHIJKLM"
>>> sets = [disjointSet(name) for name in names]
>>> roots = sets[:]
>>> import random
>>> gen = random.WichmannHill(42)
>>> while 1:
... for s in sets:
... print "%s->%s" % (s, s.find()),
... print
... if len(roots) > 1:
... s1 = gen.choice(roots)
... roots.remove(s1)
... s2 = gen.choice(roots)
... s1.union(s2)
... print "merged", s1, "into", s2
... else:
... break
A->A B->B C->C D->D E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged D into G
A->A B->B C->C D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged C into F
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->L M->M
merged L into A
A->A B->B C->F D->G E->E F->F G->G H->H I->I J->J K->K L->A M->M
merged H into E
A->A B->B C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged B into E
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->J K->K L->A M->M
merged J into G
A->A B->E C->F D->G E->E F->F G->G H->E I->I J->G K->K L->A M->M
merged E into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->M
merged M into G
A->A B->G C->F D->G E->G F->F G->G H->G I->I J->G K->K L->A M->G
merged I into K
A->A B->G C->F D->G E->G F->F G->G H->G I->K J->G K->K L->A M->G
merged K into A
A->A B->G C->F D->G E->G F->F G->G H->G I->A J->G K->A L->A M->G
merged F into A
A->A B->G C->A D->G E->G F->A G->G H->G I->A J->G K->A L->A M->G
merged A into G
A->G B->G C->G D->G E->G F->G G->G H->G I->G J->G K->G L->G M->G
"""
# Emacs turd '
# Fun tests (for sufficiently warped notions of "fun").
fun_tests = """
Build up to a recursive Sieve of Eratosthenes generator.
>>> def firstn(g, n):
... return [g.next() for i in range(n)]
>>> def intsfrom(i):
... while 1:
... yield i
... i += 1
>>> firstn(intsfrom(5), 7)
[5, 6, 7, 8, 9, 10, 11]
>>> def exclude_multiples(n, ints):
... for i in ints:
... if i % n:
... yield i
>>> firstn(exclude_multiples(3, intsfrom(1)), 6)
[1, 2, 4, 5, 7, 8]
>>> def sieve(ints):
... prime = ints.next()
... yield prime
... not_divisible_by_prime = exclude_multiples(prime, ints)
... for p in sieve(not_divisible_by_prime):
... yield p
>>> primes = sieve(intsfrom(2))
>>> firstn(primes, 20)
[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71]
Another famous problem: generate all integers of the form
2**i * 3**j * 5**k
in increasing order, where i,j,k >= 0. Trickier than it may look at first!
Try writing it without generators, and correctly, and without generating
3 internal results for each result output.
>>> def times(n, g):
... for i in g:
... yield n * i
>>> firstn(times(10, intsfrom(1)), 10)
[10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
>>> def merge(g, h):
... ng = g.next()
... nh = h.next()
... while 1:
... if ng < nh:
... yield ng
... ng = g.next()
... elif ng > nh:
... yield nh
... nh = h.next()
... else:
... yield ng
... ng = g.next()
... nh = h.next()
The following works, but is doing a whale of a lot of redundant work --
it's not clear how to get the internal uses of m235 to share a single
generator. Note that me_times2 (etc) each need to see every element in the
result sequence. So this is an example where lazy lists are more natural
(you can look at the head of a lazy list any number of times).
>>> def m235():
... yield 1
... me_times2 = times(2, m235())
... me_times3 = times(3, m235())
... me_times5 = times(5, m235())
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Don't print "too many" of these -- the implementation above is extremely
inefficient: each call of m235() leads to 3 recursive calls, and in
turn each of those 3 more, and so on, and so on, until we've descended
enough levels to satisfy the print stmts. Very odd: when I printed 5
lines of results below, this managed to screw up Win98's malloc in "the
usual" way, i.e. the heap grew over 4Mb so Win98 started fragmenting
address space, and it *looked* like a very slow leak.
>>> result = m235()
>>> for i in range(3):
... print firstn(result, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
Heh. Here's one way to get a shared list, complete with an excruciating
namespace renaming trick. The *pretty* part is that the times() and merge()
functions can be reused as-is, because they only assume their stream
arguments are iterable -- a LazyList is the same as a generator to times().
>>> class LazyList:
... def __init__(self, g):
... self.sofar = []
... self.fetch = g.next
...
... def __getitem__(self, i):
... sofar, fetch = self.sofar, self.fetch
... while i >= len(sofar):
... sofar.append(fetch())
... return sofar[i]
>>> def m235():
... yield 1
... # Gack: m235 below actually refers to a LazyList.
... me_times2 = times(2, m235)
... me_times3 = times(3, m235)
... me_times5 = times(5, m235)
... for i in merge(merge(me_times2,
... me_times3),
... me_times5):
... yield i
Print as many of these as you like -- *this* implementation is memory-
efficient.
>>> m235 = LazyList(m235())
>>> for i in range(5):
... print [m235[j] for j in range(15*i, 15*(i+1))]
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
Ye olde Fibonacci generator, LazyList style.
>>> def fibgen(a, b):
...
... def sum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def tail(g):
... g.next() # throw first away
... for x in g:
... yield x
...
... yield a
... yield b
... for s in sum(iter(fib),
... tail(iter(fib))):
... yield s
>>> fib = LazyList(fibgen(1, 2))
>>> firstn(iter(fib), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
Running after your tail with itertools.tee (new in version 2.4)
The algorithms "m235" (Hamming) and Fibonacci presented above are both
examples of a whole family of FP (functional programming) algorithms
where a function produces and returns a list while the production algorithm
suppose the list as already produced by recursively calling itself.
For these algorithms to work, they must:
- produce at least a first element without presupposing the existence of
the rest of the list
- produce their elements in a lazy manner
To work efficiently, the beginning of the list must not be recomputed over
and over again. This is ensured in most FP languages as a built-in feature.
In python, we have to explicitly maintain a list of already computed results
and abandon genuine recursivity.
This is what had been attempted above with the LazyList class. One problem
with that class is that it keeps a list of all of the generated results and
therefore continually grows. This partially defeats the goal of the generator
concept, viz. produce the results only as needed instead of producing them
all and thereby wasting memory.
Thanks to itertools.tee, it is now clear "how to get the internal uses of
m235 to share a single generator".
>>> from itertools import tee
>>> def m235():
... def _m235():
... yield 1
... for n in merge(times(2, m2),
... merge(times(3, m3),
... times(5, m5))):
... yield n
... m1 = _m235()
... m2, m3, m5, mRes = tee(m1, 4)
... return mRes
>>> it = m235()
>>> for i in range(5):
... print firstn(it, 15)
[1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24]
[25, 27, 30, 32, 36, 40, 45, 48, 50, 54, 60, 64, 72, 75, 80]
[81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, 162, 180, 192]
[200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, 384]
[400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675]
The "tee" function does just what we want. It internally keeps a generated
result for as long as it has not been "consumed" from all of the duplicated
iterators, whereupon it is deleted. You can therefore print the hamming
sequence during hours without increasing memory usage, or very little.
The beauty of it is that recursive running-after-their-tail FP algorithms
are quite straightforwardly expressed with this Python idiom.
Ye olde Fibonacci generator, tee style.
>>> def fib():
...
... def _isum(g, h):
... while 1:
... yield g.next() + h.next()
...
... def _fib():
... yield 1
... yield 2
... fibTail.next() # throw first away
... for res in _isum(fibHead, fibTail):
... yield res
...
... realfib = _fib()
... fibHead, fibTail, fibRes = tee(realfib, 3)
... return fibRes
>>> firstn(fib(), 17)
[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597, 2584]
"""
# syntax_tests mostly provokes SyntaxErrors. Also fiddling with #if 0
# hackery.
syntax_tests = """
>>> def f(): #doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
... return 22
... yield 1
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[0]>, line 3)
>>> def f(): #doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
... yield 1
... return 22
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[1]>, line 3)
"return None" is not the same as "return" in a generator:
>>> def f(): #doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
... yield 1
... return None
Traceback (most recent call last):
..
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[2]>, line 3)
These are fine:
>>> def f():
... yield 1
... return
>>> def f():
... try:
... yield 1
... finally:
... pass
>>> def f():
... try:
... try:
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... pass
... finally:
... pass
>>> def f():
... try:
... try:
... yield 12
... 1//0
... except ZeroDivisionError:
... yield 666
... except:
... try:
... x = 12
... finally:
... yield 12
... except:
... return
>>> list(f())
[12, 666]
>>> def f():
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... yield 1
>>> type(f())
<type 'generator'>
>>> def f():
... if "":
... yield None
>>> type(f())
<type 'generator'>
>>> def f():
... return
... try:
... if x==4:
... pass
... elif 0:
... try:
... 1//0
... except SyntaxError:
... pass
... else:
... if 0:
... while 12:
... x += 1
... yield 2 # don't blink
... f(a, b, c, d, e)
... else:
... pass
... except:
... x = 1
... return
>>> type(f())
<type 'generator'>
>>> def f():
... if 0:
... def g():
... yield 1
...
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... class C:
... def __init__(self):
... yield 1
... def f(self):
... yield 2
>>> type(f())
<type 'NoneType'>
>>> def f():
... if 0:
... return
... if 0:
... yield 2
>>> type(f())
<type 'generator'>
>>> def f(): #doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
... if 0:
... lambda x: x # shouldn't trigger here
... return # or here
... def f(i):
... return 2*i # or here
... if 0:
... return 3 # but *this* sucks (line 8)
... if 0:
... yield 2 # because it's a generator (line 10)
Traceback (most recent call last):
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.syntax[24]>, line 10)
This one caused a crash (see SF bug 567538):
>>> def f():
... for i in range(3):
... try:
... continue
... finally:
... yield i
...
>>> g = f()
>>> print g.next()
0
>>> print g.next()
1
>>> print g.next()
2
>>> print g.next()
Traceback (most recent call last):
StopIteration
"""
# conjoin is a simple backtracking generator, named in honor of Icon's
# "conjunction" control structure. Pass a list of no-argument functions
# that return iterable objects. Easiest to explain by example: assume the
# function list [x, y, z] is passed. Then conjoin acts like:
#
# def g():
# values = [None] * 3
# for values[0] in x():
# for values[1] in y():
# for values[2] in z():
# yield values
#
# So some 3-lists of values *may* be generated, each time we successfully
# get into the innermost loop. If an iterator fails (is exhausted) before
# then, it "backtracks" to get the next value from the nearest enclosing
# iterator (the one "to the left"), and starts all over again at the next
# slot (pumps a fresh iterator). Of course this is most useful when the
# iterators have side-effects, so that which values *can* be generated at
# each slot depend on the values iterated at previous slots.
def conjoin(gs):
values = [None] * len(gs)
def gen(i, values=values):
if i >= len(gs):
yield values
else:
for values[i] in gs[i]():
for x in gen(i+1):
yield x
for x in gen(0):
yield x
# That works fine, but recursing a level and checking i against len(gs) for
# each item produced is inefficient. By doing manual loop unrolling across
# generator boundaries, it's possible to eliminate most of that overhead.
# This isn't worth the bother *in general* for generators, but conjoin() is
# a core building block for some CPU-intensive generator applications.
def conjoin(gs):
n = len(gs)
values = [None] * n
# Do one loop nest at time recursively, until the # of loop nests
# remaining is divisible by 3.
def gen(i, values=values):
if i >= n:
yield values
elif (n-i) % 3:
ip1 = i+1
for values[i] in gs[i]():
for x in gen(ip1):
yield x
else:
for x in _gen3(i):
yield x
# Do three loop nests at a time, recursing only if at least three more
# remain. Don't call directly: this is an internal optimization for
# gen's use.
def _gen3(i, values=values):
assert i < n and (n-i) % 3 == 0
ip1, ip2, ip3 = i+1, i+2, i+3
g, g1, g2 = gs[i : ip3]
if ip3 >= n:
# These are the last three, so we can yield values directly.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
yield values
else:
# At least 6 loop nests remain; peel off 3 and recurse for the
# rest.
for values[i] in g():
for values[ip1] in g1():
for values[ip2] in g2():
for x in _gen3(ip3):
yield x
for x in gen(0):
yield x
# And one more approach: For backtracking apps like the Knight's Tour
# solver below, the number of backtracking levels can be enormous (one
# level per square, for the Knight's Tour, so that e.g. a 100x100 board
# needs 10,000 levels). In such cases Python is likely to run out of
# stack space due to recursion. So here's a recursion-free version of
# conjoin too.
# NOTE WELL: This allows large problems to be solved with only trivial
# demands on stack space. Without explicitly resumable generators, this is
# much harder to achieve. OTOH, this is much slower (up to a factor of 2)
# than the fancy unrolled recursive conjoin.
def flat_conjoin(gs): # rename to conjoin to run tests with this instead
n = len(gs)
values = [None] * n
iters = [None] * n
_StopIteration = StopIteration # make local because caught a *lot*
i = 0
while 1:
# Descend.
try:
while i < n:
it = iters[i] = gs[i]().next
values[i] = it()
i += 1
except _StopIteration:
pass
else:
assert i == n
yield values
# Backtrack until an older iterator can be resumed.
i -= 1
while i >= 0:
try:
values[i] = iters[i]()
# Success! Start fresh at next level.
i += 1
break
except _StopIteration:
# Continue backtracking.
i -= 1
else:
assert i < 0
break
# A conjoin-based N-Queens solver.
class Queens:
def __init__(self, n):
self.n = n
rangen = range(n)
# Assign a unique int to each column and diagonal.
# columns: n of those, range(n).
# NW-SE diagonals: 2n-1 of these, i-j unique and invariant along
# each, smallest i-j is 0-(n-1) = 1-n, so add n-1 to shift to 0-
# based.
# NE-SW diagonals: 2n-1 of these, i+j unique and invariant along
# each, smallest i+j is 0, largest is 2n-2.
# For each square, compute a bit vector of the columns and
# diagonals it covers, and for each row compute a function that
# generates the possiblities for the columns in that row.
self.rowgenerators = []
for i in rangen:
rowuses = [(1L << j) | # column ordinal
(1L << (n + i-j + n-1)) | # NW-SE ordinal
(1L << (n + 2*n-1 + i+j)) # NE-SW ordinal
for j in rangen]
def rowgen(rowuses=rowuses):
for j in rangen:
uses = rowuses[j]
if uses & self.used == 0:
self.used |= uses
yield j
self.used &= ~uses
self.rowgenerators.append(rowgen)
# Generate solutions.
def solve(self):
self.used = 0
for row2col in conjoin(self.rowgenerators):
yield row2col
def printsolution(self, row2col):
n = self.n
assert n == len(row2col)
sep = "+" + "-+" * n
print sep
for i in range(n):
squares = [" " for j in range(n)]
squares[row2col[i]] = "Q"
print "|" + "|".join(squares) + "|"
print sep
# A conjoin-based Knight's Tour solver. This is pretty sophisticated
# (e.g., when used with flat_conjoin above, and passing hard=1 to the
# constructor, a 200x200 Knight's Tour was found quickly -- note that we're
# creating 10s of thousands of generators then!), and is lengthy.
class Knights:
def __init__(self, m, n, hard=0):
self.m, self.n = m, n
# solve() will set up succs[i] to be a list of square #i's
# successors.
succs = self.succs = []
# Remove i0 from each of its successor's successor lists, i.e.
# successors can't go back to i0 again. Return 0 if we can
# detect this makes a solution impossible, else return 1.
def remove_from_successors(i0, len=len):
# If we remove all exits from a free square, we're dead:
# even if we move to it next, we can't leave it again.
# If we create a square with one exit, we must visit it next;
# else somebody else will have to visit it, and since there's
# only one adjacent, there won't be a way to leave it again.
# Finelly, if we create more than one free square with a
# single exit, we can only move to one of them next, leaving
# the other one a dead end.
ne0 = ne1 = 0
for i in succs[i0]:
s = succs[i]
s.remove(i0)
e = len(s)
if e == 0:
ne0 += 1
elif e == 1:
ne1 += 1
return ne0 == 0 and ne1 < 2
# Put i0 back in each of its successor's successor lists.
def add_to_successors(i0):
for i in succs[i0]:
succs[i].append(i0)
# Generate the first move.
def first():
if m < 1 or n < 1:
return
# Since we're looking for a cycle, it doesn't matter where we
# start. Starting in a corner makes the 2nd move easy.
corner = self.coords2index(0, 0)
remove_from_successors(corner)
self.lastij = corner
yield corner
add_to_successors(corner)
# Generate the second moves.
def second():
corner = self.coords2index(0, 0)
assert self.lastij == corner # i.e., we started in the corner
if m < 3 or n < 3:
return
assert len(succs[corner]) == 2
assert self.coords2index(1, 2) in succs[corner]
assert self.coords2index(2, 1) in succs[corner]
# Only two choices. Whichever we pick, the other must be the
# square picked on move m*n, as it's the only way to get back
# to (0, 0). Save its index in self.final so that moves before
# the last know it must be kept free.
for i, j in (1, 2), (2, 1):
this = self.coords2index(i, j)
final = self.coords2index(3-i, 3-j)
self.final = final
remove_from_successors(this)
succs[final].append(corner)
self.lastij = this
yield this
succs[final].remove(corner)
add_to_successors(this)
# Generate moves 3 thru m*n-1.
def advance(len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, i)]
break
candidates.append((e, i))
else:
candidates.sort()
for e, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate moves 3 thru m*n-1. Alternative version using a
# stronger (but more expensive) heuristic to order successors.
# Since the # of backtracking levels is m*n, a poor move early on
# can take eons to undo. Smallest square board for which this
# matters a lot is 52x52.
def advance_hard(vmid=(m-1)/2.0, hmid=(n-1)/2.0, len=len):
# If some successor has only one exit, must take it.
# Else favor successors with fewer exits.
# Break ties via max distance from board centerpoint (favor
# corners and edges whenever possible).
candidates = []
for i in succs[self.lastij]:
e = len(succs[i])
assert e > 0, "else remove_from_successors() pruning flawed"
if e == 1:
candidates = [(e, 0, i)]
break
i1, j1 = self.index2coords(i)
d = (i1 - vmid)**2 + (j1 - hmid)**2
candidates.append((e, -d, i))
else:
candidates.sort()
for e, d, i in candidates:
if i != self.final:
if remove_from_successors(i):
self.lastij = i
yield i
add_to_successors(i)
# Generate the last move.
def last():
assert self.final in succs[self.lastij]
yield self.final
if m*n < 4:
self.squaregenerators = [first]
else:
self.squaregenerators = [first, second] + \
[hard and advance_hard or advance] * (m*n - 3) + \
[last]
def coords2index(self, i, j):
assert 0 <= i < self.m
assert 0 <= j < self.n
return i * self.n + j
def index2coords(self, index):
assert 0 <= index < self.m * self.n
return divmod(index, self.n)
def _init_board(self):
succs = self.succs
del succs[:]
m, n = self.m, self.n
c2i = self.coords2index
offsets = [( 1, 2), ( 2, 1), ( 2, -1), ( 1, -2),
(-1, -2), (-2, -1), (-2, 1), (-1, 2)]
rangen = range(n)
for i in range(m):
for j in rangen:
s = [c2i(i+io, j+jo) for io, jo in offsets
if 0 <= i+io < m and
0 <= j+jo < n]
succs.append(s)
# Generate solutions.
def solve(self):
self._init_board()
for x in conjoin(self.squaregenerators):
yield x
def printsolution(self, x):
m, n = self.m, self.n
assert len(x) == m*n
w = len(str(m*n))
format = "%" + str(w) + "d"
squares = [[None] * n for i in range(m)]
k = 1
for i in x:
i1, j1 = self.index2coords(i)
squares[i1][j1] = format % k
k += 1
sep = "+" + ("-" * w + "+") * n
print sep
for i in range(m):
row = squares[i]
print "|" + "|".join(row) + "|"
print sep
conjoin_tests = """
Generate the 3-bit binary numbers in order. This illustrates dumbest-
possible use of conjoin, just to generate the full cross-product.
>>> for c in conjoin([lambda: iter((0, 1))] * 3):
... print c
[0, 0, 0]
[0, 0, 1]
[0, 1, 0]
[0, 1, 1]
[1, 0, 0]
[1, 0, 1]
[1, 1, 0]
[1, 1, 1]
For efficiency in typical backtracking apps, conjoin() yields the same list
object each time. So if you want to save away a full account of its
generated sequence, you need to copy its results.
>>> def gencopy(iterator):
... for x in iterator:
... yield x[:]
>>> for n in range(10):
... all = list(gencopy(conjoin([lambda: iter((0, 1))] * n)))
... print n, len(all), all[0] == [0] * n, all[-1] == [1] * n
0 1 True True
1 2 True True
2 4 True True
3 8 True True
4 16 True True
5 32 True True
6 64 True True
7 128 True True
8 256 True True
9 512 True True
And run an 8-queens solver.
>>> q = Queens(8)
>>> LIMIT = 2
>>> count = 0
>>> for row2col in q.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... q.printsolution(row2col)
Solution 1
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
Solution 2
+-+-+-+-+-+-+-+-+
|Q| | | | | | | |
+-+-+-+-+-+-+-+-+
| | | | | |Q| | |
+-+-+-+-+-+-+-+-+
| | | | | | | |Q|
+-+-+-+-+-+-+-+-+
| | |Q| | | | | |
+-+-+-+-+-+-+-+-+
| | | | | | |Q| |
+-+-+-+-+-+-+-+-+
| | | |Q| | | | |
+-+-+-+-+-+-+-+-+
| |Q| | | | | | |
+-+-+-+-+-+-+-+-+
| | | | |Q| | | |
+-+-+-+-+-+-+-+-+
>>> print count, "solutions in all."
92 solutions in all.
And run a Knight's Tour on a 10x10 board. Note that there are about
20,000 solutions even on a 6x6 board, so don't dare run this to exhaustion.
>>> k = Knights(10, 10)
>>> LIMIT = 2
>>> count = 0
>>> for x in k.solve():
... count += 1
... if count <= LIMIT:
... print "Solution", count
... k.printsolution(x)
... else:
... break
Solution 1
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 91| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 88| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 92| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 89| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
Solution 2
+---+---+---+---+---+---+---+---+---+---+
| 1| 58| 27| 34| 3| 40| 29| 10| 5| 8|
+---+---+---+---+---+---+---+---+---+---+
| 26| 35| 2| 57| 28| 33| 4| 7| 30| 11|
+---+---+---+---+---+---+---+---+---+---+
| 59|100| 73| 36| 41| 56| 39| 32| 9| 6|
+---+---+---+---+---+---+---+---+---+---+
| 74| 25| 60| 55| 72| 37| 42| 49| 12| 31|
+---+---+---+---+---+---+---+---+---+---+
| 61| 86| 99| 76| 63| 52| 47| 38| 43| 50|
+---+---+---+---+---+---+---+---+---+---+
| 24| 75| 62| 85| 54| 71| 64| 51| 48| 13|
+---+---+---+---+---+---+---+---+---+---+
| 87| 98| 89| 80| 77| 84| 53| 46| 65| 44|
+---+---+---+---+---+---+---+---+---+---+
| 90| 23| 92| 95| 70| 79| 68| 83| 14| 17|
+---+---+---+---+---+---+---+---+---+---+
| 97| 88| 21| 78| 81| 94| 19| 16| 45| 66|
+---+---+---+---+---+---+---+---+---+---+
| 22| 91| 96| 93| 20| 69| 82| 67| 18| 15|
+---+---+---+---+---+---+---+---+---+---+
"""
weakref_tests = """\
Generators are weakly referencable:
>>> import weakref
>>> def gen():
... yield 'foo!'
...
>>> wr = weakref.ref(gen)
>>> wr() is gen
True
>>> p = weakref.proxy(gen)
Generator-iterators are weakly referencable as well:
>>> gi = gen()
>>> wr = weakref.ref(gi)
>>> wr() is gi
True
>>> p = weakref.proxy(gi)
>>> list(p)
['foo!']
"""
coroutine_tests = """\
Sending a value into a started generator:
>>> def f():
... print (yield 1)
... yield 2
>>> g = f()
>>> g.next()
1
>>> g.send(42)
42
2
Sending a value into a new generator produces a TypeError:
>>> f().send("foo")
Traceback (most recent call last):
...
TypeError: can't send non-None value to a just-started generator
Yield by itself yields None:
>>> def f(): yield
>>> list(f())
[None]
An obscene abuse of a yield expression within a generator expression:
>>> list((yield 21) for i in range(4))
[21, None, 21, None, 21, None, 21, None]
And a more sane, but still weird usage:
>>> def f(): list(i for i in [(yield 26)])
>>> type(f())
<type 'generator'>
A yield expression with augmented assignment.
>>> def coroutine(seq):
... count = 0
... while count < 200:
... count += yield
... seq.append(count)
>>> seq = []
>>> c = coroutine(seq)
>>> c.next()
>>> print seq
[]
>>> c.send(10)
>>> print seq
[10]
>>> c.send(10)
>>> print seq
[10, 20]
>>> c.send(10)
>>> print seq
[10, 20, 30]
Check some syntax errors for yield expressions:
>>> f=lambda: (yield 1),(yield 2) #doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
SyntaxError: 'yield' outside function (<doctest test.test_generators.__test__.coroutine[21]>, line 1)
>>> def f(): return lambda x=(yield): 1 #doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
SyntaxError: 'return' with argument inside generator (<doctest test.test_generators.__test__.coroutine[22]>, line 1)
>>> def f(): x = yield = y #doctest: +IGNORE_EXCEPTION_DETAIL, +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible (<doctest test.test_generators.__test__.coroutine[23]>, line 1)
>>> def f(): (yield bar) = y #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: can't assign to yield expression (<doctest test.test_generators.__test__.coroutine[24]>, line 1)
>>> def f(): (yield bar) += y #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: augmented assignment to yield expression not possible (<doctest test.test_generators.__test__.coroutine[25]>, line 1)
Now check some throw() conditions:
>>> def f():
... while True:
... try:
... print (yield)
... except ValueError,v:
... print "caught ValueError (%s)" % (v),
>>> import sys
>>> g = f()
>>> g.next()
>>> g.throw(ValueError) # type only
caught ValueError ()
>>> g.throw(ValueError("xyz")) # value only
caught ValueError (xyz)
>>> g.throw(ValueError, ValueError(1)) # value+matching type
caught ValueError (1)
>>> g.throw(ValueError, TypeError(1)) # mismatched type, rewrapped
caught ValueError (1)
>>> g.throw(ValueError, ValueError(1), None) # explicit None traceback
caught ValueError (1)
>>> g.throw(ValueError(1), "foo") # bad args
Traceback (most recent call last):
...
TypeError: instance exception may not have a separate value
>>> g.throw(ValueError, "foo", 23) # bad args #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError: throw() third argument must be a traceback object
>>> def throw(g,exc):
... try:
... raise exc
... except:
... g.throw(*sys.exc_info())
>>> throw(g,ValueError) # do it with traceback included
caught ValueError ()
>>> g.send(1)
1
>>> throw(g,TypeError) # terminate the generator
Traceback (most recent call last):
...
TypeError
>>> print g.gi_frame
None
>>> g.send(2)
Traceback (most recent call last):
...
StopIteration
>>> g.throw(ValueError,6) # throw on closed generator
Traceback (most recent call last):
...
ValueError: 6
>>> f().throw(ValueError,7) # throw on just-opened generator
Traceback (most recent call last):
...
ValueError: 7
Now let's try closing a generator:
>>> def f():
... try: yield
... except GeneratorExit:
... print "exiting"
>>> g = f()
>>> g.next()
>>> g.close()
exiting
>>> g.close() # should be no-op now
>>> f().close() # close on just-opened generator should be fine
>>> def f(): yield # an even simpler generator
>>> f().close() # close before opening
>>> g = f()
>>> g.next()
>>> g.close() # close normally
And finalization. But we have to force the timing of GC here, since we are running on Jython:
>>> def f():
... try: yield
... finally:
... print "exiting"
>>> g = f()
>>> g.next()
>>> del g; extra_collect()
exiting
Now let's try some ill-behaved generators:
>>> def f():
... try: yield
... except GeneratorExit:
... yield "foo!"
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
RuntimeError: generator ignored GeneratorExit
>>> g.close()
Our ill-behaved code should be invoked during GC:
>>> import sys, StringIO
>>> old, sys.stderr = sys.stderr, StringIO.StringIO()
>>> g = f()
>>> g.next()
>>> del g; extra_collect()
>>> sys.stderr.getvalue().startswith(
... "Exception RuntimeError"
... )
True
>>> sys.stderr = old
And errors thrown during closing should propagate:
>>> def f():
... try: yield
... except GeneratorExit:
... raise TypeError("fie!")
>>> g = f()
>>> g.next()
>>> g.close()
Traceback (most recent call last):
...
TypeError: fie!
Ensure that various yield expression constructs make their
enclosing function a generator:
>>> def f(): x += yield
>>> type(f())
<type 'generator'>
>>> def f(): x = yield
>>> type(f())
<type 'generator'>
>>> def f(): lambda x=(yield): 1
>>> type(f())
<type 'generator'>
>>> def f(): x=(i for i in (yield) if (yield))
>>> type(f())
<type 'generator'>
>>> def f(d): d[(yield "a")] = d[(yield "b")] = 27
>>> data = [1,2]
>>> g = f(data)
>>> type(g)
<type 'generator'>
>>> g.send(None)
'a'
>>> data
[1, 2]
>>> g.send(0)
'b'
>>> data
[27, 2]
>>> try: g.send(1)
... except StopIteration: pass
>>> data
[27, 27]
"""
refleaks_tests = """
Prior to adding cycle-GC support to itertools.tee, this code would leak
references. We add it to the standard suite so the routine refleak-tests
would trigger if it starts being uncleanable again.
>>> import itertools
>>> def leak():
... class gen:
... def __iter__(self):
... return self
... def next(self):
... return self.item
... g = gen()
... head, tail = itertools.tee(g)
... g.item = head
... return head
>>> it = leak()
Make sure to also test the involvement of the tee-internal teedataobject,
which stores returned items.
>>> item = it.next()
This test leaked at one point due to generator finalization/destruction.
It was copied from Lib/test/leakers/test_generator_cycle.py before the file
was removed.
>>> def leak():
... def gen():
... while True:
... yield g
... g = gen()
>>> leak()
These refleak tests should perhaps be in a testfile of their own,
test_generators just happened to be the test that drew these out.
"""
__test__ = {"tut": tutorial_tests,
"pep": pep_tests,
"email": email_tests,
"fun": fun_tests,
"syntax": syntax_tests,
"conjoin": conjoin_tests,
"weakref": weakref_tests,
"coroutine": coroutine_tests,
"refleaks": refleaks_tests,
}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
from test import test_support, test_generators
test_support.run_doctest(test_generators, verbose)
def extra_collect():
import gc
from time import sleep
gc.collect(); sleep(1); gc.collect(); sleep(0.1); gc.collect()
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
|
adaussy/eclipse-monkey-revival
|
plugins/python/org.eclipse.eclipsemonkey.lang.python/Lib/test/test_generators.py
|
Python
|
epl-1.0
| 49,410
|
[
"VisIt"
] |
8e827bf8c4ace6a9f0278b2540a0761be32314f9fa87b0fe145e26de9aac5439
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A powerful dynamic attention wrapper object."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import math
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base as layers_base
from tensorflow.python.layers import core as layers_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
__all__ = [
"AttentionMechanism",
"AttentionWrapper",
"AttentionWrapperState",
"LuongAttention",
"BahdanauAttention",
"hardmax",
"safe_cumprod",
"monotonic_attention",
"BahdanauMonotonicAttention",
"LuongMonotonicAttention",
]
_zero_state_tensors = rnn_cell_impl._zero_state_tensors # pylint: disable=protected-access
class AttentionMechanism(object):
@property
def alignments_size(self):
raise NotImplementedError
@property
def state_size(self):
raise NotImplementedError
def _prepare_memory(memory, memory_sequence_length, check_inner_dims_defined):
"""Convert to tensor and possibly mask `memory`.
Args:
memory: `Tensor`, shaped `[batch_size, max_time, ...]`.
memory_sequence_length: `int32` `Tensor`, shaped `[batch_size]`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
Returns:
A (possibly masked), checked, new `memory`.
Raises:
ValueError: If `check_inner_dims_defined` is `True` and not
`memory.shape[2:].is_fully_defined()`.
"""
memory = nest.map_structure(
lambda m: ops.convert_to_tensor(m, name="memory"), memory)
if memory_sequence_length is not None:
memory_sequence_length = ops.convert_to_tensor(
memory_sequence_length, name="memory_sequence_length")
if check_inner_dims_defined:
def _check_dims(m):
if not m.get_shape()[2:].is_fully_defined():
raise ValueError("Expected memory %s to have fully defined inner dims, "
"but saw shape: %s" % (m.name, m.get_shape()))
nest.map_structure(_check_dims, memory)
if memory_sequence_length is None:
seq_len_mask = None
else:
seq_len_mask = array_ops.sequence_mask(
memory_sequence_length,
maxlen=array_ops.shape(nest.flatten(memory)[0])[1],
dtype=nest.flatten(memory)[0].dtype)
seq_len_batch_size = (
memory_sequence_length.shape[0].value
or array_ops.shape(memory_sequence_length)[0])
def _maybe_mask(m, seq_len_mask):
rank = m.get_shape().ndims
rank = rank if rank is not None else array_ops.rank(m)
extra_ones = array_ops.ones(rank - 2, dtype=dtypes.int32)
m_batch_size = m.shape[0].value or array_ops.shape(m)[0]
if memory_sequence_length is not None:
message = ("memory_sequence_length and memory tensor batch sizes do not "
"match.")
with ops.control_dependencies([
check_ops.assert_equal(
seq_len_batch_size, m_batch_size, message=message)]):
seq_len_mask = array_ops.reshape(
seq_len_mask,
array_ops.concat((array_ops.shape(seq_len_mask), extra_ones), 0))
return m * seq_len_mask
else:
return m
return nest.map_structure(lambda m: _maybe_mask(m, seq_len_mask), memory)
def _maybe_mask_score(score, memory_sequence_length, score_mask_value):
if memory_sequence_length is None:
return score
message = ("All values in memory_sequence_length must greater than zero.")
with ops.control_dependencies(
[check_ops.assert_positive(memory_sequence_length, message=message)]):
score_mask = array_ops.sequence_mask(
memory_sequence_length, maxlen=array_ops.shape(score)[1])
score_mask_values = score_mask_value * array_ops.ones_like(score)
return array_ops.where(score_mask, score, score_mask_values)
class _BaseAttentionMechanism(AttentionMechanism):
"""A base AttentionMechanism class providing common functionality.
Common functionality includes:
1. Storing the query and memory layers.
2. Preprocessing and storing the memory.
"""
def __init__(self,
query_layer,
memory,
probability_fn,
memory_sequence_length=None,
memory_layer=None,
check_inner_dims_defined=True,
score_mask_value=None,
name=None):
"""Construct base AttentionMechanism class.
Args:
query_layer: Callable. Instance of `tf.layers.Layer`. The layer's depth
must match the depth of `memory_layer`. If `query_layer` is not
provided, the shape of `query` must match that of `memory_layer`.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
probability_fn: A `callable`. Converts the score and previous alignments
to probabilities. Its signature should be:
`probabilities = probability_fn(score, state)`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
memory_layer: Instance of `tf.layers.Layer` (may be None). The layer's
depth must match the depth of `query_layer`.
If `memory_layer` is not provided, the shape of `memory` must match
that of `query_layer`.
check_inner_dims_defined: Python boolean. If `True`, the `memory`
argument's shape is checked to ensure all but the two outermost
dimensions are fully defined.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
"""
if (query_layer is not None
and not isinstance(query_layer, layers_base.Layer)):
raise TypeError(
"query_layer is not a Layer: %s" % type(query_layer).__name__)
if (memory_layer is not None
and not isinstance(memory_layer, layers_base.Layer)):
raise TypeError(
"memory_layer is not a Layer: %s" % type(memory_layer).__name__)
self._query_layer = query_layer
self._memory_layer = memory_layer
self.dtype = memory_layer.dtype
if not callable(probability_fn):
raise TypeError("probability_fn must be callable, saw type: %s" %
type(probability_fn).__name__)
if score_mask_value is None:
score_mask_value = dtypes.as_dtype(
self._memory_layer.dtype).as_numpy_dtype(-np.inf)
self._probability_fn = lambda score, prev: ( # pylint:disable=g-long-lambda
probability_fn(
_maybe_mask_score(score, memory_sequence_length, score_mask_value),
prev))
with ops.name_scope(
name, "BaseAttentionMechanismInit", nest.flatten(memory)):
self._values = _prepare_memory(
memory, memory_sequence_length,
check_inner_dims_defined=check_inner_dims_defined)
self._keys = (
self.memory_layer(self._values) if self.memory_layer # pylint: disable=not-callable
else self._values)
self._batch_size = (
self._keys.shape[0].value or array_ops.shape(self._keys)[0])
self._alignments_size = (self._keys.shape[1].value or
array_ops.shape(self._keys)[1])
@property
def memory_layer(self):
return self._memory_layer
@property
def query_layer(self):
return self._query_layer
@property
def values(self):
return self._values
@property
def keys(self):
return self._keys
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self._alignments_size
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
"""Creates the initial state values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return the same output as initial_alignments.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A structure of all-zero tensors with shapes as described by `state_size`.
"""
return self.initial_alignments(batch_size, dtype)
def _luong_score(query, keys, scale):
"""Implements Luong-style (multiplicative) scoring function.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, call this function with `scale=True`.
Args:
query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
scale: Whether to apply a scale to the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
Raises:
ValueError: If `key` and `query` depths do not match.
"""
depth = query.get_shape()[-1]
key_units = keys.get_shape()[-1]
if depth != key_units:
raise ValueError(
"Incompatible or unknown inner dimensions between query and keys. "
"Query (%s) has units: %s. Keys (%s) have units: %s. "
"Perhaps you need to set num_units to the keys' dimension (%s)?"
% (query, depth, keys, key_units, key_units))
dtype = query.dtype
# Reshape from [batch_size, depth] to [batch_size, 1, depth]
# for matmul.
query = array_ops.expand_dims(query, 1)
# Inner product along the query units dimension.
# matmul shapes: query is [batch_size, 1, depth] and
# keys is [batch_size, max_time, depth].
# the inner product is asked to **transpose keys' inner shape** to get a
# batched matmul on:
# [batch_size, 1, depth] . [batch_size, depth, max_time]
# resulting in an output shape of:
# [batch_size, 1, max_time].
# we then squeeze out the center singleton dimension.
score = math_ops.matmul(query, keys, transpose_b=True)
score = array_ops.squeeze(score, [1])
if scale:
# Scalar used in weight scaling
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=init_ops.ones_initializer, shape=())
score = g * score
return score
class LuongAttention(_BaseAttentionMechanism):
"""Implements Luong-style (multiplicative) attention scoring.
This attention has two forms. The first is standard Luong attention,
as described in:
Minh-Thang Luong, Hieu Pham, Christopher D. Manning.
"Effective Approaches to Attention-based Neural Machine Translation."
EMNLP 2015. https://arxiv.org/abs/1508.04025
The second is the scaled form inspired partly by the normalized form of
Bahdanau attention.
To enable the second form, construct the object with parameter
`scale=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="LuongAttention"):
"""Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length: (optional) Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional) The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the memory layer of the attention mechanism.
name: Name to use when creating ops.
"""
# For LuongAttention, we only transform the memory layer; thus
# num_units **must** match expected the query depth.
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(LuongAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_attention", [query]):
score = _luong_score(query, self._keys, self._scale)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def _bahdanau_score(processed_query, keys, normalize):
"""Implements Bahdanau-style (additive) scoring function.
This attention has two forms. The first is Bhandanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, set `normalize=True`.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
normalize: Whether to normalize the score function.
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
dtype = processed_query.dtype
# Get the number of hidden units from the trailing dimension of keys
num_units = keys.shape[2].value or array_ops.shape(keys)[2]
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
processed_query = array_ops.expand_dims(processed_query, 1)
v = variable_scope.get_variable(
"attention_v", [num_units], dtype=dtype)
if normalize:
# Scalar used in weight normalization
g = variable_scope.get_variable(
"attention_g", dtype=dtype,
initializer=init_ops.constant_initializer(math.sqrt((1. / num_units))),
shape=())
# Bias added prior to the nonlinearity
b = variable_scope.get_variable(
"attention_b", [num_units], dtype=dtype,
initializer=init_ops.zeros_initializer())
# normed_v = g * v / ||v||
normed_v = g * v * math_ops.rsqrt(
math_ops.reduce_sum(math_ops.square(v)))
return math_ops.reduce_sum(
normed_v * math_ops.tanh(keys + processed_query + b), [2])
else:
return math_ops.reduce_sum(v * math_ops.tanh(keys + processed_query), [2])
class BahdanauAttention(_BaseAttentionMechanism):
"""Implements Bahdanau-style (additive) attention.
This attention has two forms. The first is Bahdanau attention,
as described in:
Dzmitry Bahdanau, Kyunghyun Cho, Yoshua Bengio.
"Neural Machine Translation by Jointly Learning to Align and Translate."
ICLR 2015. https://arxiv.org/abs/1409.0473
The second is the normalized form. This form is inspired by the
weight normalization article:
Tim Salimans, Diederik P. Kingma.
"Weight Normalization: A Simple Reparameterization to Accelerate
Training of Deep Neural Networks."
https://arxiv.org/abs/1602.07868
To enable the second form, construct the object with parameter
`normalize=True`.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
probability_fn=None,
score_mask_value=None,
dtype=None,
name="BahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
if probability_fn is None:
probability_fn = nn_ops.softmax
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = lambda score, _: probability_fn(score)
super(BahdanauAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
def safe_cumprod(x, *args, **kwargs):
"""Computes cumprod of x in logspace using cumsum to avoid underflow.
The cumprod function and its gradient can result in numerical instabilities
when its argument has very small and/or zero values. As long as the argument
is all positive, we can instead compute the cumulative product as
exp(cumsum(log(x))). This function can be called identically to tf.cumprod.
Args:
x: Tensor to take the cumulative product of.
*args: Passed on to cumsum; these are identical to those in cumprod.
**kwargs: Passed on to cumsum; these are identical to those in cumprod.
Returns:
Cumulative product of x.
"""
with ops.name_scope(None, "SafeCumprod", [x]):
x = ops.convert_to_tensor(x, name="x")
tiny = np.finfo(x.dtype.as_numpy_dtype).tiny
return math_ops.exp(math_ops.cumsum(
math_ops.log(clip_ops.clip_by_value(x, tiny, 1)), *args, **kwargs))
def monotonic_attention(p_choose_i, previous_attention, mode):
"""Compute monotonic attention distribution from choosing probabilities.
Monotonic attention implies that the input sequence is processed in an
explicitly left-to-right manner when generating the output sequence. In
addition, once an input sequence element is attended to at a given output
timestep, elements occurring before it cannot be attended to at subsequent
output timesteps. This function generates attention distributions according
to these assumptions. For more information, see `Online and Linear-Time
Attention by Enforcing Monotonic Alignments`.
Args:
p_choose_i: Probability of choosing input sequence/memory element i. Should
be of shape (batch_size, input_sequence_length), and should all be in the
range [0, 1].
previous_attention: The attention distribution from the previous output
timestep. Should be of shape (batch_size, input_sequence_length). For
the first output timestep, preevious_attention[n] should be [1, 0, 0, ...,
0] for all n in [0, ... batch_size - 1].
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'.
* 'recursive' uses tf.scan to recursively compute the distribution.
This is slowest but is exact, general, and does not suffer from
numerical instabilities.
* 'parallel' uses parallelized cumulative-sum and cumulative-product
operations to compute a closed-form solution to the recurrence
relation defining the attention distribution. This makes it more
efficient than 'recursive', but it requires numerical checks which
make the distribution non-exact. This can be a problem in particular
when input_sequence_length is long and/or p_choose_i has entries very
close to 0 or 1.
* 'hard' requires that the probabilities in p_choose_i are all either 0
or 1, and subsequently uses a more efficient and exact solution.
Returns:
A tensor of shape (batch_size, input_sequence_length) representing the
attention distributions for each sequence in the batch.
Raises:
ValueError: mode is not one of 'recursive', 'parallel', 'hard'.
"""
# Force things to be tensors
p_choose_i = ops.convert_to_tensor(p_choose_i, name="p_choose_i")
previous_attention = ops.convert_to_tensor(
previous_attention, name="previous_attention")
if mode == "recursive":
# Use .shape[0].value when it's not None, or fall back on symbolic shape
batch_size = p_choose_i.shape[0].value or array_ops.shape(p_choose_i)[0]
# Compute [1, 1 - p_choose_i[0], 1 - p_choose_i[1], ..., 1 - p_choose_i[-2]]
shifted_1mp_choose_i = array_ops.concat(
[array_ops.ones((batch_size, 1)), 1 - p_choose_i[:, :-1]], 1)
# Compute attention distribution recursively as
# q[i] = (1 - p_choose_i[i - 1])*q[i - 1] + previous_attention[i]
# attention[i] = p_choose_i[i]*q[i]
attention = p_choose_i*array_ops.transpose(functional_ops.scan(
# Need to use reshape to remind TF of the shape between loop iterations
lambda x, yz: array_ops.reshape(yz[0]*x + yz[1], (batch_size,)),
# Loop variables yz[0] and yz[1]
[array_ops.transpose(shifted_1mp_choose_i),
array_ops.transpose(previous_attention)],
# Initial value of x is just zeros
array_ops.zeros((batch_size,))))
elif mode == "parallel":
# safe_cumprod computes cumprod in logspace with numeric checks
cumprod_1mp_choose_i = safe_cumprod(1 - p_choose_i, axis=1, exclusive=True)
# Compute recurrence relation solution
attention = p_choose_i*cumprod_1mp_choose_i*math_ops.cumsum(
previous_attention /
# Clip cumprod_1mp to avoid divide-by-zero
clip_ops.clip_by_value(cumprod_1mp_choose_i, 1e-10, 1.), axis=1)
elif mode == "hard":
# Remove any probabilities before the index chosen last time step
p_choose_i *= math_ops.cumsum(previous_attention, axis=1)
# Now, use exclusive cumprod to remove probabilities after the first
# chosen index, like so:
# p_choose_i = [0, 0, 0, 1, 1, 0, 1, 1]
# cumprod(1 - p_choose_i, exclusive=True) = [1, 1, 1, 1, 0, 0, 0, 0]
# Product of above: [0, 0, 0, 1, 0, 0, 0, 0]
attention = p_choose_i*math_ops.cumprod(
1 - p_choose_i, axis=1, exclusive=True)
else:
raise ValueError("mode must be 'recursive', 'parallel', or 'hard'.")
return attention
def _monotonic_probability_fn(score, previous_alignments, sigmoid_noise, mode,
seed=None):
"""Attention probability function for monotonic attention.
Takes in unnormalized attention scores, adds pre-sigmoid noise to encourage
the model to make discrete attention decisions, passes them through a sigmoid
to obtain "choosing" probabilities, and then calls monotonic_attention to
obtain the attention distribution. For more information, see
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
Args:
score: Unnormalized attention scores, shape `[batch_size, alignments_size]`
previous_alignments: Previous attention distribution, shape
`[batch_size, alignments_size]`
sigmoid_noise: Standard deviation of pre-sigmoid noise. Setting this larger
than 0 will encourage the model to produce large attention scores,
effectively making the choosing probabilities discrete and the resulting
attention distribution one-hot. It should be set to 0 at test-time, and
when hard attention is not desired.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
seed: (optional) Random seed for pre-sigmoid noise.
Returns:
A `[batch_size, alignments_size]`-shape tensor corresponding to the
resulting attention distribution.
"""
# Optionally add pre-sigmoid noise to the scores
if sigmoid_noise > 0:
noise = random_ops.random_normal(array_ops.shape(score), dtype=score.dtype,
seed=seed)
score += sigmoid_noise*noise
# Compute "choosing" probabilities from the attention scores
if mode == "hard":
# When mode is hard, use a hard sigmoid
p_choose_i = math_ops.cast(score > 0, score.dtype)
else:
p_choose_i = math_ops.sigmoid(score)
# Convert from choosing probabilities to attention distribution
return monotonic_attention(p_choose_i, previous_alignments, mode)
class _BaseMonotonicAttentionMechanism(_BaseAttentionMechanism):
"""Base attention mechanism for monotonic attention.
Simply overrides the initial_alignments function to provide a dirac
distribution, which is needed in order for the monotonic attention
distributions to have the correct behavior.
"""
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the monotonic attentions.
Initializes to dirac distributions, i.e. [1, 0, 0, ...memory length..., 0]
for all entries in the batch.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
max_time = self._alignments_size
return array_ops.one_hot(
array_ops.zeros((batch_size,), dtype=dtypes.int32), max_time,
dtype=dtype)
class BahdanauMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="BahdanauMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(BahdanauMonotonicAttention, self).__init__(
query_layer=layers_core.Dense(
num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
self._score_bias_init = score_bias_init
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(
None, "bahdanau_monotonic_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=processed_query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class LuongMonotonicAttention(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Luong-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Otherwise, it is equivalent to
LuongAttention. This approach is proposed in
Colin Raffel, Minh-Thang Luong, Peter J. Liu, Ron J. Weiss, Douglas Eck,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
scale=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="LuongMonotonicAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
scale: Python boolean. Whether to scale the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = dtypes.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(LuongMonotonicAttention, self).__init__(
query_layer=None,
memory_layer=layers_core.Dense(
num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._scale = scale
self._score_bias_init = score_bias_init
self._name = name
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with variable_scope.variable_scope(None, "luong_monotonic_attention",
[query]):
score = _luong_score(query, self._keys, self._scale)
score_bias = variable_scope.get_variable(
"attention_score_bias", dtype=query.dtype,
initializer=self._score_bias_init)
score += score_bias
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
class AttentionWrapperState(
collections.namedtuple("AttentionWrapperState",
("cell_state", "attention", "time", "alignments",
"alignment_history", "attention_state"))):
"""`namedtuple` storing the state of a `AttentionWrapper`.
Contains:
- `cell_state`: The state of the wrapped `RNNCell` at the previous time
step.
- `attention`: The attention emitted at the previous time step.
- `time`: int32 scalar containing the current time step.
- `alignments`: A single or tuple of `Tensor`(s) containing the alignments
emitted at the previous time step for each attention mechanism.
- `alignment_history`: (if enabled) a single or tuple of `TensorArray`(s)
containing alignment matrices from all time steps for each attention
mechanism. Call `stack()` on each to convert to a `Tensor`.
- `attention_state`: A single or tuple of nested objects
containing attention mechanism state for each attention mechanism.
The objects may contain Tensors or TensorArrays.
"""
def clone(self, **kwargs):
"""Clone this object, overriding components provided by kwargs.
The new state fields' shape must match original state fields' shape. This
will be validated, and original fields' shape will be propagated to new
fields.
Example:
```python
initial_state = attention_wrapper.zero_state(dtype=..., batch_size=...)
initial_state = initial_state.clone(cell_state=encoder_state)
```
Args:
**kwargs: Any properties of the state object to replace in the returned
`AttentionWrapperState`.
Returns:
A new `AttentionWrapperState` whose properties are the same as
this one, except any overridden properties as provided in `kwargs`.
"""
def with_same_shape(old, new):
"""Check and set new tensor's shape."""
if isinstance(old, ops.Tensor) and isinstance(new, ops.Tensor):
return tensor_util.with_same_shape(old, new)
return new
return nest.map_structure(
with_same_shape,
self,
super(AttentionWrapperState, self)._replace(**kwargs))
def hardmax(logits, name=None):
"""Returns batched one-hot vectors.
The depth index containing the `1` is that of the maximum logit value.
Args:
logits: A batch tensor of logit values.
name: Name to use when creating ops.
Returns:
A batched one-hot tensor.
"""
with ops.name_scope(name, "Hardmax", [logits]):
logits = ops.convert_to_tensor(logits, name="logits")
if logits.get_shape()[-1].value is not None:
depth = logits.get_shape()[-1].value
else:
depth = array_ops.shape(logits)[-1]
return array_ops.one_hot(
math_ops.argmax(logits, -1), depth, dtype=logits.dtype)
def _compute_attention(attention_mechanism, cell_output, attention_state,
attention_layer):
"""Computes the attention and alignments for a given attention_mechanism."""
alignments, next_attention_state = attention_mechanism(
cell_output, state=attention_state)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = array_ops.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context = math_ops.matmul(expanded_alignments, attention_mechanism.values)
context = array_ops.squeeze(context, [1])
if attention_layer is not None:
attention = attention_layer(array_ops.concat([cell_output, context], 1))
else:
attention = context
return attention, alignments, next_attention_state
class AttentionWrapper(rnn_cell_impl.RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None,
attention_layer=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length. If
attention_layer is set, this must be None.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: array_ops.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the behavior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
attention_layer: A list of `tf.layers.Layer` instances or a
single `tf.layers.Layer` instance taking the context and cell output as
inputs to generate attention at each time step. If None (default), use
the context as attention at each time step. If attention_mechanism is a
list, attention_layer must be a list of the same length. If
attention_layers_size is set, this must be None.
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`;
if `attention_layer_size` and `attention_layer` are set simultaneously.
"""
super(AttentionWrapper, self).__init__(name=name)
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s"
% type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: array_ops.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None and attention_layer is not None:
raise ValueError("Only one of attention_layer_size and attention_layer "
"should be set")
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
elif attention_layer is not None:
self._attention_layers = tuple(
attention_layer
if isinstance(attention_layer, (list, tuple))
else (attention_layer,))
if len(self._attention_layers) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer must contain exactly one "
"layer per attention_mechanism, saw: %d vs %d"
% (len(self._attention_layers), len(attention_mechanisms)))
self._attention_layer_size = sum(
layer.compute_output_shape(
[None,
cell.output_size + mechanism.values.shape[-1].value])[-1].value
for layer, mechanism in zip(
self._attention_layers, attention_mechanisms))
else:
self._attention_layers = None
self._attention_layer_size = sum(
attention_mechanism.values.get_shape()[-1].value
for attention_mechanism in attention_mechanisms)
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with ops.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or array_ops.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def _batch_size_checks(self, batch_size, error_message):
return [check_ops.assert_equal(batch_size,
attention_mechanism.batch_size,
message=error_message)
for attention_mechanism in self._attention_mechanisms]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tensor_shape.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with ops.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: array_ops.identity(s, name="checked_cell_state"),
cell_state)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms]
return AttentionWrapperState(
cell_state=cell_state,
time=array_ops.zeros([], dtype=dtypes.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tensor_array_ops.TensorArray(
dtype,
size=0,
dynamic_size=True,
element_shape=alignment.shape)
if self._alignment_history else ()
for alignment in initial_alignments))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or array_ops.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with ops.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = array_ops.identity(
cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = _compute_attention(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = array_ops.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
|
nburn42/tensorflow
|
tensorflow/contrib/seq2seq/python/ops/attention_wrapper.py
|
Python
|
apache-2.0
| 61,484
|
[
"DIRAC"
] |
99e476b6791c6b78262e89d83dde0f5be8dd4444cc7dba8d339a2adabb17e547
|
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function
import numpy as np
from pyscf.nao.ao_log import ao_log
from pyscf.nao.m_sbt import sbt_c
from pyscf.nao.m_c2r import c2r_c
from pyscf.nao.m_log_interp import log_interp_c
from pyscf.nao.m_ao_log_hartree import ao_log_hartree
from timeit import default_timer as timer
from pyscf.nao.m_gaunt import gaunt_c
#from pyscf.dft.radi import gauss_legendre
from pyscf.nao.m_gauleg import gauss_legendre
#
#
#
def build_3dgrid(me, sp1, R1, sp2, R2, **kw):
"""
me: prod_log class
"""
from pyscf import dft
from pyscf.nao import nao
assert sp1>=0
assert sp2>=0
if ( (R1-R2)**2 ).sum()<1e-7 :
mol = nao(xyz_list=[ [int(me.aos[0].sp2charge[sp1]), R1] ])
else:
mol = nao(xyz_list=[[int(me.aos[0].sp2charge[sp1]), R1],
[int(me.aos[1].sp2charge[sp2]), R2]])
atom2rcut=np.array([me.aos[isp].sp_mu2rcut[sp].max() \
for isp,sp in enumerate([sp1,sp2])])
grids = dft.gen_grid.Grids(mol)
grids.level = kw['level'] if 'level' in kw else 3 # precision as implemented in pyscf
grids.radi_method=gauss_legendre
grids.dump_flags()
grids.build(atom2rcut=atom2rcut)
#grids.build()
return grids
#
#
#
def build_3dgrid3c(me, sp1, sp2, R1, R2, sp3, R3, level=3):
from pyscf import dft
from pyscf.nao.m_system_vars import system_vars_c
d12 = ((R1-R2)**2).sum()
d13 = ((R1-R3)**2).sum()
d23 = ((R2-R3)**2).sum()
z1 = int(me.aos[0].sp2charge[sp1])
z2 = int(me.aos[0].sp2charge[sp2])
z3 = int(me.aos[1].sp2charge[sp3])
rc1 = me.aos[0].sp2rcut[sp1]
rc2 = me.aos[0].sp2rcut[sp2]
rc3 = me.aos[1].sp2rcut[sp3]
if d12<1e-7 and d23<1e-7 :
mol = system_vars_c(atom=[ [z1, R1] ])
elif d12<1e-7 and d23>1e-7 and d13>1e-7:
mol = system_vars_c(atom=[ [z1, R1], [z3, R3] ])
elif d23<1e-7 and d12>1e-7 and d13>1e-7:
mol = system_vars_c(atom=[ [z1, R1], [z2, R2] ])
elif d13<1e-7 and d12>1e-7 and d23>1e-7:
mol = system_vars_c(atom=[ [z1, R1], [z2, R2] ])
else :
mol = system_vars_c(atom=[ [z1, R1], [z2, R2], [z3, R3] ])
atom2rcut=np.array([rc1, rc2, rc3])
grids = dft.gen_grid.Grids(mol)
grids.level = level # precision as implemented in pyscf
grids.radi_method = gauss_legendre
grids.build(atom2rcut=atom2rcut)
return grids
#
#
class ao_matelem_c(sbt_c, c2r_c, gaunt_c):
'''
Evaluator of matrix elements given by the numerical atomic orbitals.
The class will contain
the Gaunt coefficients,
the complex -> real transform (for spherical harmonics) and
the spherical Bessel transform.
'''
def __init__(self, rr, pp, sv=None, dm=None):
""" Basic """
from pyscf.nao.m_init_dm_libnao import init_dm_libnao
from pyscf.nao.m_init_dens_libnao import init_dens_libnao
self.interp_rr = log_interp_c(rr)
self.interp_pp = log_interp_c(pp)
self.rr3_dr = rr**3 * np.log(rr[1]/rr[0])
self.dr_jt = np.log(rr[1]/rr[0])
self.four_pi = 4*np.pi
self.const = np.sqrt(np.pi/2.0)
self.pp2 = pp**2
self.sv = None if sv is None else sv.init_libnao()
self.dm = None if dm is None else init_dm_libnao(dm)
if dm is not None and sv is not None : init_dens_libnao()
# @classmethod # I don't understand something about classmethod
def init_one_set(self, ao, **kvargs):
""" Constructor for two-center matrix elements, i.e. one set of radial orbitals per specie is provided """
self.jmx = ao.jmx
c2r_c.__init__(self, self.jmx)
sbt_c.__init__(self, ao.rr, ao.pp, lmax=2*self.jmx+1)
gaunt_c.__init__(self, self.jmx)
self.ao1 = ao
self.ao1._add_sp2info()
self.ao1._add_psi_log_mom()
self.ao2 = self.ao1
self.ao2_hartree = ao_log_hartree(self.ao1, **kvargs)
self.aos = [self.ao1, self.ao2]
return self
# @classmethod # I don't understand something about classmethod
def init_two_sets(self, ao1, ao2, **kvargs):
""" Constructor for matrix elements between product functions and orbital's products: two sets of radial orbitals must be provided. """
self.jmx = max(ao1.jmx, ao2.jmx)
c2r_c.__init__(self, self.jmx)
sbt_c.__init__(self, ao1.rr, ao1.pp, lmax=2*self.jmx+1)
gaunt_c.__init__(self, self.jmx)
self.ao1 = ao1
self.ao1._add_sp2info()
self.ao1._add_psi_log_mom()
self.pp2 = self.ao1.pp**2
self.ao2 = ao2
self.ao2._add_sp2info()
self.ao2_hartree = ao_log_hartree(self.ao2, **kvargs)
self.ao2._add_psi_log_mom()
self.aos = [self.ao1, self.ao2]
return self
#
def overlap_am(self, sp1,R1, sp2,R2):
from pyscf.nao.m_overlap_am import overlap_am as overlap
return overlap(self, sp1,R1, sp2,R2)
def overlap_ni(self, sp1,R1, sp2,R2, **kvargs):
from pyscf.nao.m_overlap_ni import overlap_ni
return overlap_ni(self, sp1,R1, sp2,R2, **kvargs)
def coulomb_am(self, sp1,R1, sp2,R2):
from pyscf.nao.m_coulomb_am import coulomb_am as ext
return ext(self, sp1,R1, sp2,R2)
def coulomb_ni(self, sp1,R1, sp2,R2,**kvargs):
from pyscf.nao.m_eri2c import eri2c as ext
return ext(self, sp1,R1, sp2,R2,**kvargs)
def xc_scalar(self, sp1,R1, sp2,R2,**kvargs):
from pyscf.nao.m_xc_scalar_ni import xc_scalar_ni as ext
return ext(self, sp1,R1, sp2,R2,**kvargs)
|
gkc1000/pyscf
|
pyscf/nao/m_ao_matelem.py
|
Python
|
apache-2.0
| 5,878
|
[
"PySCF"
] |
c66714f2551b27c3e1f7ad9c3fa379929a7c6e3bf3b2e899b4799aeada94ef05
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import warnings
from pymatgen.core.structure import Structure
from pymatgen.core.units import Ha_to_eV, bohr_to_ang
from pymatgen.io.abinit.abiobjects import *
from pymatgen.util.testing import PymatgenTest
class LatticeFromAbivarsTest(PymatgenTest):
def test_rprim_acell(self):
l1 = lattice_from_abivars(acell=3 * [10], rprim=np.eye(3))
self.assertAlmostEqual(l1.volume, bohr_to_ang ** 3 * 1000)
assert l1.angles == (90, 90, 90)
l2 = lattice_from_abivars(acell=3 * [10], angdeg=(90, 90, 90))
assert l1 == l2
l2 = lattice_from_abivars(acell=3 * [8], angdeg=(60, 60, 60))
abi_rprimd = (
np.reshape(
[
4.6188022,
0.0000000,
6.5319726,
-2.3094011,
4.0000000,
6.5319726,
-2.3094011,
-4.0000000,
6.5319726,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l2.matrix, abi_rprimd)
l3 = lattice_from_abivars(acell=[3, 6, 9], angdeg=(30, 40, 50))
abi_rprimd = (
np.reshape(
[
3.0000000,
0.0000000,
0.0000000,
3.8567257,
4.5962667,
0.0000000,
6.8944000,
4.3895544,
3.7681642,
],
(3, 3),
)
* bohr_to_ang
)
self.assertArrayAlmostEqual(l3.matrix, abi_rprimd)
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(90, 90, 90), rprim=np.eye(3))
with self.assertRaises(ValueError):
lattice_from_abivars(acell=[1, 1, 1], angdeg=(-90, 90, 90))
def test_znucl_typat(self):
"""Test the order of typat and znucl in the Abinit input and enforce_typat, enforce_znucl."""
# Ga Ga1 1 0.33333333333333 0.666666666666667 0.500880 1.0
# Ga Ga2 1 0.66666666666667 0.333333333333333 0.000880 1.0
# N N3 1 0.333333333333333 0.666666666666667 0.124120 1.0
# N N4 1 0.666666666666667 0.333333333333333 0.624120 1.0
gan = Structure.from_file(os.path.join(PymatgenTest.TEST_FILES_DIR, "abinit", "gan.cif"))
# By default, znucl is filled using the first new type found in sites.
def_vars = structure_to_abivars(gan)
def_znucl = def_vars["znucl"]
self.assertArrayEqual(def_znucl, [31, 7])
def_typat = def_vars["typat"]
self.assertArrayEqual(def_typat, [1, 1, 2, 2])
# But it's possible to enforce a particular value of typat and znucl.
enforce_znucl = [7, 31]
enforce_typat = [2, 2, 1, 1]
enf_vars = structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=enforce_typat)
self.assertArrayEqual(enf_vars["znucl"], enforce_znucl)
self.assertArrayEqual(enf_vars["typat"], enforce_typat)
self.assertArrayEqual(def_vars["xred"], enf_vars["xred"])
assert [s.symbol for s in species_by_znucl(gan)] == ["Ga", "N"]
for itype1, itype2 in zip(def_typat, enforce_typat):
assert def_znucl[itype1 - 1] == enforce_znucl[itype2 - 1]
with self.assertRaises(Exception):
structure_to_abivars(gan, enforce_znucl=enforce_znucl, enforce_typat=None)
class SpinModeTest(PymatgenTest):
def test_base(self):
polarized = SpinMode.as_spinmode("polarized")
other_polarized = SpinMode.as_spinmode("polarized")
unpolarized = SpinMode.as_spinmode("unpolarized")
polarized.to_abivars()
self.assertTrue(polarized is other_polarized)
self.assertTrue(polarized == other_polarized)
self.assertTrue(polarized != unpolarized)
# Test pickle
self.serialize_with_pickle(polarized)
# Test dict methods
self.assertMSONable(polarized)
self.assertMSONable(unpolarized)
class SmearingTest(PymatgenTest):
def test_base(self):
fd1ev = Smearing.as_smearing("fermi_dirac:1 eV")
fd1ev.to_abivars()
self.assertTrue(fd1ev)
same_fd = Smearing.as_smearing("fermi_dirac:" + str(1.0 / Ha_to_eV))
self.assertTrue(same_fd == fd1ev)
nosmear = Smearing.nosmearing()
assert nosmear == Smearing.as_smearing("nosmearing")
self.assertFalse(nosmear)
self.assertTrue(nosmear != fd1ev)
self.assertMSONable(nosmear)
new_fd1ev = Smearing.from_dict(fd1ev.as_dict())
self.assertTrue(new_fd1ev == fd1ev)
# Test pickle
self.serialize_with_pickle(fd1ev)
# Test dict methods
self.assertMSONable(fd1ev)
class ElectronsAlgorithmTest(PymatgenTest):
def test_base(self):
algo = ElectronsAlgorithm(nstep=70)
abivars = algo.to_abivars()
# Test pickle
self.serialize_with_pickle(algo)
# Test dict methods
self.assertMSONable(algo)
class ElectronsTest(PymatgenTest):
def test_base(self):
default_electrons = Electrons()
self.assertTrue(default_electrons.nsppol == 2)
self.assertTrue(default_electrons.nspinor == 1)
self.assertTrue(default_electrons.nspden == 2)
abivars = default_electrons.to_abivars()
# new = Electron.from_dict(default_electrons.as_dict())
# Test pickle
self.serialize_with_pickle(default_electrons, test_eq=False)
custom_electrons = Electrons(
spin_mode="unpolarized",
smearing="marzari4:0.2 eV",
algorithm=ElectronsAlgorithm(nstep=70),
nband=10,
charge=1.0,
comment="Test comment",
)
# Test dict methods
self.assertMSONable(custom_electrons)
class KSamplingTest(PymatgenTest):
def test_base(self):
monkhorst = KSampling.monkhorst((3, 3, 3), (0.5, 0.5, 0.5), 0, False, False)
gamma_centered = KSampling.gamma_centered((3, 3, 3), False, False)
monkhorst.to_abivars()
# Test dict methods
self.assertMSONable(monkhorst)
self.assertMSONable(gamma_centered)
class RelaxationTest(PymatgenTest):
def test_base(self):
atoms_and_cell = RelaxationMethod.atoms_and_cell()
atoms_only = RelaxationMethod.atoms_only()
atoms_and_cell.to_abivars()
# Test dict methods
self.assertMSONable(atoms_and_cell)
self.assertMSONable(atoms_only)
class PPModelTest(PymatgenTest):
def test_base(self):
godby = PPModel.as_ppmodel("godby:12 eV")
# print(godby)
# print(repr(godby))
godby.to_abivars()
self.assertTrue(godby)
same_godby = PPModel.as_ppmodel("godby:" + str(12.0 / Ha_to_eV))
self.assertTrue(same_godby == godby)
noppm = PPModel.get_noppmodel()
self.assertFalse(noppm)
self.assertTrue(noppm != godby)
new_godby = PPModel.from_dict(godby.as_dict())
self.assertTrue(new_godby == godby)
# Test pickle
self.serialize_with_pickle(godby)
# Test dict methods
self.assertMSONable(godby)
|
vorwerkc/pymatgen
|
pymatgen/io/abinit/tests/test_abiobjects.py
|
Python
|
mit
| 7,463
|
[
"ABINIT",
"pymatgen"
] |
8d2195dcfca17469d572591f8652f944f4815ce1b6d191a6c1ef87a5a8c359ce
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
A Flow is a container for Works, and works consist of tasks.
Flows are the final objects that can be dumped directly to a pickle file on disk
Flows are executed using abirun (abipy).
"""
from __future__ import unicode_literals, division, print_function
import os
import sys
import time
import collections
import warnings
import shutil
import copy
import tempfile
import numpy as np
from pprint import pprint
from six.moves import map, StringIO
from tabulate import tabulate
from pydispatch import dispatcher
from collections import OrderedDict
from monty.collections import as_set, dict2namedtuple
from monty.string import list_strings, is_string, make_banner
from monty.operator import operator_from_str
from monty.io import FileLock
from monty.pprint import draw_tree
from monty.termcolor import cprint, colored, cprint_map, get_terminal_size
from monty.inspect import find_top_pyfile
from monty.dev import deprecated
from monty.json import MSONable
from pymatgen.serializers.pickle_coders import pmg_pickle_load, pmg_pickle_dump
from pymatgen.serializers.json_coders import pmg_serialize
from pymatgen.core.units import Memory
from pymatgen.util.io_utils import AtomicFile
from pymatgen.util.plotting import add_fig_kwargs, get_ax_fig_plt
from . import wrappers
from .nodes import Status, Node, NodeError, NodeResults, Dependency, GarbageCollector, check_spectator
from .tasks import ScfTask, DdkTask, DdeTask, TaskManager, FixQueueCriticalError
from .utils import File, Directory, Editor
from .abiinspect import yaml_read_irred_perts
from .works import NodeContainer, Work, BandStructureWork, PhononWork, BecWork, G0W0Work, QptdmWork, DteWork
from .events import EventsParser # autodoc_event_handlers
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__all__ = [
"Flow",
"G0W0WithQptdmFlow",
"bandstructure_flow",
"g0w0_flow",
"phonon_flow",
]
class FlowResults(NodeResults):
JSON_SCHEMA = NodeResults.JSON_SCHEMA.copy()
#JSON_SCHEMA["properties"] = {
# "queries": {"type": "string", "required": True},
#}
@classmethod
def from_node(cls, flow):
"""Initialize an instance from a Work instance."""
new = super(FlowResults, cls).from_node(flow)
# Will put all files found in outdir in GridFs
d = {os.path.basename(f): f for f in flow.outdir.list_filepaths()}
# Add the pickle file.
d["pickle"] = flow.pickle_file if flow.pickle_protocol != 0 else (flow.pickle_file, "t")
new.add_gridfs_files(**d)
return new
class FlowError(NodeError):
"""Base Exception for :class:`Node` methods"""
class Flow(Node, NodeContainer, MSONable):
"""
This object is a container of work. Its main task is managing the
possible inter-dependencies among the work and the creation of
dynamic workflows that are generated by callbacks registered by the user.
.. attributes::
creation_date: String with the creation_date
pickle_protocol: Protocol for Pickle database (default: -1 i.e. latest protocol)
Important methods for constructing flows:
.. methods::
register_work: register (add) a work to the flow
resister_task: register a work that contains only this task returns the work
allocate: propagate the workdir and manager of the flow to all the registered tasks
build:
build_and_pickle_dump:
"""
VERSION = "0.1"
PICKLE_FNAME = "__AbinitFlow__.pickle"
Error = FlowError
Results = FlowResults
@classmethod
def from_inputs(cls, workdir, inputs, manager=None, pickle_protocol=-1, task_class=ScfTask, work_class=Work):
"""
Construct a simple flow from a list of inputs. The flow contains a single Work with
tasks whose class is given by task_class.
.. warning::
Don't use this interface if you have dependencies among the tasks.
Args:
workdir: String specifying the directory where the works will be produced.
inputs: List of inputs.
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
task_class: The class of the :class:`Task`.
work_class: The class of the :class:`Work`.
"""
if not isinstance(inputs, (list, tuple)): inputs = [inputs]
flow = cls(workdir, manager=manager, pickle_protocol=pickle_protocol)
work = work_class()
for inp in inputs:
work.register(inp, task_class=task_class)
flow.register_work(work)
return flow.allocate()
@classmethod
def as_flow(cls, obj):
"""Convert obj into a Flow. Accepts filepath, dict, or Flow object."""
if isinstance(obj, cls): return obj
if is_string(obj):
return cls.pickle_load(obj)
elif isinstance(obj, collections.Mapping):
return cls.from_dict(obj)
else:
raise TypeError("Don't know how to convert type %s into a Flow" % type(obj))
def __init__(self, workdir, manager=None, pickle_protocol=-1, remove=False):
"""
Args:
workdir: String specifying the directory where the works will be produced.
if workdir is None, the initialization of the working directory
is performed by flow.allocate(workdir).
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
pickle_protocol: Pickle protocol version used for saving the status of the object.
-1 denotes the latest version supported by the python interpreter.
remove: attempt to remove working directory `workdir` if directory already exists.
"""
super(Flow, self).__init__()
if workdir is not None:
if remove and os.path.exists(workdir): shutil.rmtree(workdir)
self.set_workdir(workdir)
self.creation_date = time.asctime()
if manager is None: manager = TaskManager.from_user_config()
self.manager = manager.deepcopy()
# List of works.
self._works = []
self._waited = 0
# List of callbacks that must be executed when the dependencies reach S_OK
self._callbacks = []
# Install default list of handlers at the flow level.
# Users can override the default list by calling flow.install_event_handlers in the script.
# Example:
#
# # flow level (common case)
# flow.install_event_handlers(handlers=my_handlers)
#
# # task level (advanced mode)
# flow[0][0].install_event_handlers(handlers=my_handlers)
#
self.install_event_handlers()
self.pickle_protocol = int(pickle_protocol)
# ID used to access mongodb
self._mongo_id = None
# Save the location of the script used to generate the flow.
# This trick won't work if we are running with nosetests, py.test etc
pyfile = find_top_pyfile()
if "python" in pyfile or "ipython" in pyfile: pyfile = "<" + pyfile + ">"
self.set_pyfile(pyfile)
# TODO
# Signal slots: a dictionary with the list
# of callbacks indexed by node_id and SIGNAL_TYPE.
# When the node changes its status, it broadcast a signal.
# The flow is listening to all the nodes of the calculation
# [node_id][SIGNAL] = list_of_signal_handlers
#self._sig_slots = slots = {}
#for work in self:
# slots[work] = {s: [] for s in work.S_ALL}
#for task in self.iflat_tasks():
# slots[task] = {s: [] for s in work.S_ALL}
@pmg_serialize
def as_dict(self, **kwargs):
"""
JSON serialization, note that we only need to save
a string with the working directory since the object will be
reconstructed from the pickle file located in workdir
"""
return {"workdir": self.workdir}
# This is needed for fireworks.
to_dict = as_dict
@classmethod
def from_dict(cls, d, **kwargs):
"""Reconstruct the flow from the pickle file."""
return cls.pickle_load(d["workdir"], **kwargs)
@classmethod
def temporary_flow(cls, manager=None):
"""Return a Flow in a temporary directory. Useful for unit tests."""
return cls(workdir=tempfile.mkdtemp(), manager=manager)
def set_workdir(self, workdir, chroot=False):
"""
Set the working directory. Cannot be set more than once unless chroot is True
"""
if not chroot and hasattr(self, "workdir") and self.workdir != workdir:
raise ValueError("self.workdir != workdir: %s, %s" % (self.workdir, workdir))
# Directories with (input|output|temporary) data.
self.workdir = os.path.abspath(workdir)
self.indir = Directory(os.path.join(self.workdir, "indata"))
self.outdir = Directory(os.path.join(self.workdir, "outdata"))
self.tmpdir = Directory(os.path.join(self.workdir, "tmpdata"))
def reload(self):
"""
Reload the flow from the pickle file. Used when we are monitoring the flow
executed by the scheduler. In this case, indeed, the flow might have been changed
by the scheduler and we have to reload the new flow in memory.
"""
new = self.__class__.pickle_load(self.workdir)
self = new
@classmethod
def pickle_load(cls, filepath, spectator_mode=True, remove_lock=False):
"""
Loads the object from a pickle file and performs initial setup.
Args:
filepath: Filename or directory name. It filepath is a directory, we
scan the directory tree starting from filepath and we
read the first pickle database. Raise RuntimeError if multiple
databases are found.
spectator_mode: If True, the nodes of the flow are not connected by signals.
This option is usually used when we want to read a flow
in read-only mode and we want to avoid callbacks that can change the flow.
remove_lock:
True to remove the file lock if any (use it carefully).
"""
if os.path.isdir(filepath):
# Walk through each directory inside path and find the pickle database.
for dirpath, dirnames, filenames in os.walk(filepath):
fnames = [f for f in filenames if f == cls.PICKLE_FNAME]
if fnames:
if len(fnames) == 1:
filepath = os.path.join(dirpath, fnames[0])
break # Exit os.walk
else:
err_msg = "Found multiple databases:\n %s" % str(fnames)
raise RuntimeError(err_msg)
else:
err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath)
raise ValueError(err_msg)
if remove_lock and os.path.exists(filepath + ".lock"):
try:
os.remove(filepath + ".lock")
except:
pass
with FileLock(filepath):
with open(filepath, "rb") as fh:
flow = pmg_pickle_load(fh)
# Check if versions match.
if flow.VERSION != cls.VERSION:
msg = ("File flow version %s != latest version %s\n."
"Regenerate the flow to solve the problem " % (flow.VERSION, cls.VERSION))
warnings.warn(msg)
flow.set_spectator_mode(spectator_mode)
# Recompute the status of each task since tasks that
# have been submitted previously might be completed.
flow.check_status()
return flow
@classmethod
def pickle_loads(cls, s):
"""Reconstruct the flow from a string."""
strio = StringIO()
strio.write(s)
strio.seek(0)
flow = pmg_pickle_load(strio)
return flow
def __len__(self):
return len(self.works)
def __iter__(self):
return self.works.__iter__()
def __getitem__(self, slice):
return self.works[slice]
def set_pyfile(self, pyfile):
"""
Set the path of the python script used to generate the flow.
.. Example:
flow.set_pyfile(__file__)
"""
# TODO: Could use a frame hack to get the caller outside abinit
# so that pyfile is automatically set when we __init__ it!
self._pyfile = os.path.abspath(pyfile)
@property
def pyfile(self):
"""
Absolute path of the python script used to generate the flow. Set by `set_pyfile`
"""
try:
return self._pyfile
except AttributeError:
return None
@property
def pid_file(self):
"""The path of the pid file created by PyFlowScheduler."""
return os.path.join(self.workdir, "_PyFlowScheduler.pid")
def check_pid_file(self):
"""
This function checks if we are already running the :class:`Flow` with a :class:`PyFlowScheduler`.
Raises: Flow.Error if the pif file of the scheduler exists.
"""
if not os.path.exists(self.pid_file):
return 0
self.show_status()
raise self.Error("""\n\
pid_file
%s
already exists. There are two possibilities:
1) There's an another instance of PyFlowScheduler running
2) The previous scheduler didn't exit in a clean way
To solve case 1:
Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
Then you can restart the new scheduler.
To solve case 2:
Remove the pid_file and restart the scheduler.
Exiting""" % self.pid_file)
@property
def pickle_file(self):
"""The path of the pickle file."""
return os.path.join(self.workdir, self.PICKLE_FNAME)
@property
def mongo_id(self):
return self._mongo_id
@mongo_id.setter
def mongo_id(self, value):
if self.mongo_id is not None:
raise RuntimeError("Cannot change mongo_id %s" % self.mongo_id)
self._mongo_id = value
def mongodb_upload(self, **kwargs):
from abiflows.core.scheduler import FlowUploader
FlowUploader().upload(self, **kwargs)
def validate_json_schema(self):
"""Validate the JSON schema. Return list of errors."""
errors = []
for work in self:
for task in work:
if not task.get_results().validate_json_schema():
errors.append(task)
if not work.get_results().validate_json_schema():
errors.append(work)
if not self.get_results().validate_json_schema():
errors.append(self)
return errors
def get_mongo_info(self):
"""
Return a JSON dictionary with information on the flow.
Mainly used for constructing the info section in `FlowEntry`.
The default implementation is empty. Subclasses must implement it
"""
return {}
def mongo_assimilate(self):
"""
This function is called by client code when the flow is completed
Return a JSON dictionary with the most important results produced
by the flow. The default implementation is empty. Subclasses must implement it
"""
return {}
@property
def works(self):
"""List of :class:`Work` objects contained in self.."""
return self._works
@property
def all_ok(self):
"""True if all the tasks in works have reached `S_OK`."""
return all(work.all_ok for work in self)
@property
def num_tasks(self):
"""Total number of tasks"""
return len(list(self.iflat_tasks()))
@property
def errored_tasks(self):
"""List of errored tasks."""
etasks = []
for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]:
etasks.extend(list(self.iflat_tasks(status=status)))
return set(etasks)
@property
def num_errored_tasks(self):
"""The number of tasks whose status is `S_ERROR`."""
return len(self.errored_tasks)
@property
def unconverged_tasks(self):
"""List of unconverged tasks."""
return list(self.iflat_tasks(status=self.S_UNCONVERGED))
@property
def num_unconverged_tasks(self):
"""The number of tasks whose status is `S_UNCONVERGED`."""
return len(self.unconverged_tasks)
@property
def status_counter(self):
"""
Returns a :class:`Counter` object that counts the number of tasks with
given status (use the string representation of the status as key).
"""
# Count the number of tasks with given status in each work.
counter = self[0].status_counter
for work in self[1:]:
counter += work.status_counter
return counter
@property
def ncores_reserved(self):
"""
Returns the number of cores reserved in this moment.
A core is reserved if the task is not running but
we have submitted the task to the queue manager.
"""
return sum(work.ncores_reserved for work in self)
@property
def ncores_allocated(self):
"""
Returns the number of cores allocated in this moment.
A core is allocated if it's running a task or if we have
submitted a task to the queue manager but the job is still pending.
"""
return sum(work.ncores_allocated for work in self)
@property
def ncores_used(self):
"""
Returns the number of cores used in this moment.
A core is used if there's a job that is running on it.
"""
return sum(work.ncores_used for work in self)
@property
def has_chrooted(self):
"""
Returns a string that evaluates to True if we have changed
the workdir for visualization purposes e.g. we are using sshfs.
to mount the remote directory where the `Flow` is located.
The string gives the previous workdir of the flow.
"""
try:
return self._chrooted_from
except AttributeError:
return ""
def chroot(self, new_workdir):
"""
Change the workir of the :class:`Flow`. Mainly used for
allowing the user to open the GUI on the local host
and access the flow from remote via sshfs.
.. note::
Calling this method will make the flow go in read-only mode.
"""
self._chrooted_from = self.workdir
self.set_workdir(new_workdir, chroot=True)
for i, work in enumerate(self):
new_wdir = os.path.join(self.workdir, "w" + str(i))
work.chroot(new_wdir)
def groupby_status(self):
"""
Returns a ordered dictionary mapping the task status to
the list of named tuples (task, work_index, task_index).
"""
Entry = collections.namedtuple("Entry", "task wi ti")
d = collections.defaultdict(list)
for task, wi, ti in self.iflat_tasks_wti():
d[task.status].append(Entry(task, wi, ti))
# Sort keys according to their status.
return OrderedDict([(k, d[k]) for k in sorted(list(d.keys()))])
def groupby_task_class(self):
"""
Returns a dictionary mapping the task class to the list of tasks in the flow
"""
# Find all Task classes
class2tasks = OrderedDict()
for task in self.iflat_tasks():
cls = task.__class__
if cls not in class2tasks: class2tasks[cls] = []
class2tasks[cls].append(task)
return class2tasks
def iflat_nodes(self, status=None, op="==", nids=None):
"""
Generators that produces a flat sequence of nodes.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the nodes.
"""
nids = as_set(nids)
if status is None:
if not (nids and self.node_id not in nids):
yield self
for work in self:
if nids and work.node_id not in nids: continue
yield work
for task in work:
if nids and task.node_id not in nids: continue
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
if not (nids and self.node_id not in nids):
if op(self.status, status): yield self
for wi, work in enumerate(self):
if nids and work.node_id not in nids: continue
if op(work.status, status): yield work
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status): yield task
def node_from_nid(self, nid):
"""Return the node in the `Flow` with the given `nid` identifier"""
for node in self.iflat_nodes():
if node.node_id == nid: return node
raise ValueError("Cannot find node with node id: %s" % nid)
def iflat_tasks_wti(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the `Flow`.
Yields:
(task, work_index, task_index)
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=True)
def iflat_tasks(self, status=None, op="==", nids=None):
"""
Generator to iterate over all the tasks of the :class:`Flow`.
If status is not None, only the tasks whose status satisfies
the condition (task.status op status) are selected
status can be either one of the flags defined in the :class:`Task` class
(e.g Task.S_OK) or a string e.g "S_OK"
nids is an optional list of node identifiers used to filter the tasks.
"""
return self._iflat_tasks_wti(status=status, op=op, nids=nids, with_wti=False)
def _iflat_tasks_wti(self, status=None, op="==", nids=None, with_wti=True):
"""
Generators that produces a flat sequence of task.
if status is not None, only the tasks with the specified status are selected.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
(task, work_index, task_index) if with_wti is True else task
"""
nids = as_set(nids)
if status is None:
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if with_wti:
yield task, wi, ti
else:
yield task
else:
# Get the operator from the string.
op = operator_from_str(op)
# Accept Task.S_FLAG or string.
status = Status.as_status(status)
for wi, work in enumerate(self):
for ti, task in enumerate(work):
if nids and task.node_id not in nids: continue
if op(task.status, status):
if with_wti:
yield task, wi, ti
else:
yield task
def abivalidate_inputs(self):
"""
Run ABINIT in dry mode to validate all the inputs of the flow.
Return:
(isok, tuples)
isok is True if all inputs are ok.
tuples is List of `namedtuple` objects, one for each task in the flow.
Each namedtuple has the following attributes:
retcode: Return code. 0 if OK.
log_file: log file of the Abinit run, use log_file.read() to access its content.
stderr_file: stderr file of the Abinit run. use stderr_file.read() to access its content.
Raises:
`RuntimeError` if executable is not in $PATH.
"""
if not self.allocated:
self.build()
#self.build_and_pickle_dump()
isok, tuples = True, []
for task in self.iflat_tasks():
t = task.input.abivalidate()
if t.retcode != 0: isok = False
tuples.append(t)
return isok, tuples
def check_dependencies(self):
"""Test the dependencies of the nodes for possible deadlocks."""
deadlocks = []
for task in self.iflat_tasks():
for dep in task.deps:
if dep.node.depends_on(task):
deadlocks.append((task, dep.node))
if deadlocks:
lines = ["Detect wrong list of dependecies that will lead to a deadlock:"]
lines.extend(["%s <--> %s" % nodes for nodes in deadlocks])
raise RuntimeError("\n".join(lines))
def find_deadlocks(self):
"""
This function detects deadlocks
Return:
named tuple with the tasks grouped in: deadlocks, runnables, running
"""
# Find jobs that can be submitted and and the jobs that are already in the queue.
runnables = []
for work in self:
runnables.extend(work.fetch_alltasks_to_run())
runnables.extend(list(self.iflat_tasks(status=self.S_SUB)))
# Running jobs.
running = list(self.iflat_tasks(status=self.S_RUN))
# Find deadlocks.
err_tasks = self.errored_tasks
deadlocked = []
if err_tasks:
for task in self.iflat_tasks():
if any(task.depends_on(err_task) for err_task in err_tasks):
deadlocked.append(task)
return dict2namedtuple(deadlocked=deadlocked, runnables=runnables, running=running)
def check_status(self, **kwargs):
"""
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
"""
for work in self:
work.check_status()
if kwargs.pop("show", False):
self.show_status(**kwargs)
@property
def status(self):
"""The status of the :class:`Flow` i.e. the minimum of the status of its tasks and its works"""
return min(work.get_all_status(only_min=True) for work in self)
#def restart_unconverged_tasks(self, max_nlauch, excs):
# nlaunch = 0
# for task in self.unconverged_tasks:
# try:
# logger.info("Flow will try restart task %s" % task)
# fired = task.restart()
# if fired:
# nlaunch += 1
# max_nlaunch -= 1
# if max_nlaunch == 0:
# logger.info("Restart: too many jobs in the queue, returning")
# self.pickle_dump()
# return nlaunch, max_nlaunch
# except task.RestartError:
# excs.append(straceback())
# return nlaunch, max_nlaunch
def fix_abicritical(self):
"""
This function tries to fix critical events originating from ABINIT.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_ABICRITICAL):
count += task.fix_abicritical()
return count
def fix_queue_critical(self):
"""
This function tries to fix critical events originating from the queue submission system.
Returns the number of tasks that have been fixed.
"""
count = 0
for task in self.iflat_tasks(status=self.S_QCRITICAL):
logger.info("Will try to fix task %s" % str(task))
try:
print(task.fix_queue_critical())
count += 1
except FixQueueCriticalError:
logger.info("Not able to fix task %s" % task)
return count
def show_info(self, **kwargs):
"""
Print info on the flow i.e. total number of tasks, works, tasks grouped by class.
Example:
Task Class Number
------------ --------
ScfTask 1
NscfTask 1
ScrTask 2
SigmaTask 6
"""
stream = kwargs.pop("stream", sys.stdout)
lines = [str(self)]
app = lines.append
app("Number of works: %d, total number of tasks: %s" % (len(self), self.num_tasks) )
app("Number of tasks with a given class:")
# Build Table
data = [[cls.__name__, len(tasks)]
for cls, tasks in self.groupby_task_class().items()]
app(str(tabulate(data, headers=["Task Class", "Number"])))
stream.write("\n".join(lines))
def show_summary(self, **kwargs):
"""
Print a short summary with the status of the flow and a counter task_status --> number_of_tasks
Args:
stream: File-like object, Default: sys.stdout
Example:
Status Count
--------- -------
Completed 10
<Flow, node_id=27163, workdir=flow_gwconv_ecuteps>, num_tasks=10, all_ok=True
"""
stream = kwargs.pop("stream", sys.stdout)
stream.write("\n")
table = list(self.status_counter.items())
s = tabulate(table, headers=["Status", "Count"])
stream.write(s + "\n")
stream.write("\n")
stream.write("%s, num_tasks=%s, all_ok=%s\n" % (str(self), self.num_tasks, self.all_ok))
stream.write("\n")
def show_status(self, **kwargs):
"""
Report the status of the works and the status of the different tasks on the specified stream.
Args:
stream: File-like object, Default: sys.stdout
nids: List of node identifiers. By defaults all nodes are shown
wslice: Slice object used to select works.
verbose: Verbosity level (default 0). > 0 to show only the works that are not finalized.
"""
stream = kwargs.pop("stream", sys.stdout)
nids = as_set(kwargs.pop("nids", None))
wslice = kwargs.pop("wslice", None)
verbose = kwargs.pop("verbose", 0)
wlist = None
if wslice is not None:
# Convert range to list of work indices.
wlist = list(range(wslice.start, wslice.step, wslice.stop))
#has_colours = stream_has_colours(stream)
has_colours = True
red = "red" if has_colours else None
for i, work in enumerate(self):
if nids and work.node_id not in nids: continue
print("", file=stream)
cprint_map("Work #%d: %s, Finalized=%s" % (i, work, work.finalized), cmap={"True": "green"}, file=stream)
if wlist is not None and i in wlist: continue
if verbose == 0 and work.finalized:
print(" Finalized works are not shown. Use verbose > 0 to force output.", file=stream)
continue
headers = ["Task", "Status", "Queue", "MPI|Omp|Gb",
"Warn|Com", "Class", "Sub|Rest|Corr", "Time",
"Node_ID"]
table = []
tot_num_errors = 0
for task in work:
if nids and task.node_id not in nids: continue
task_name = os.path.basename(task.name)
# FIXME: This should not be done here.
# get_event_report should be called only in check_status
# Parse the events in the main output.
report = task.get_event_report()
# Get time info (run-time or time in queue or None)
stime = None
timedelta = task.datetimes.get_runtime()
if timedelta is not None:
stime = str(timedelta) + "R"
else:
timedelta = task.datetimes.get_time_inqueue()
if timedelta is not None:
stime = str(timedelta) + "Q"
events = "|".join(2*["NA"])
if report is not None:
events = '{:>4}|{:>3}'.format(*map(str, (
report.num_warnings, report.num_comments)))
para_info = '{:>4}|{:>3}|{:>3}'.format(*map(str, (
task.mpi_procs, task.omp_threads, "%.1f" % task.mem_per_proc.to("Gb"))))
task_info = list(map(str, [task.__class__.__name__,
(task.num_launches, task.num_restarts, task.num_corrections), stime, task.node_id]))
qinfo = "None"
if task.queue_id is not None:
qinfo = str(task.queue_id) + "@" + str(task.qname)
if task.status.is_critical:
tot_num_errors += 1
task_name = colored(task_name, red)
if has_colours:
table.append([task_name, task.status.colored, qinfo,
para_info, events] + task_info)
else:
table.append([task_name, str(task.status), qinfo, events,
para_info] + task_info)
# Print table and write colorized line with the total number of errors.
print(tabulate(table, headers=headers, tablefmt="grid"), file=stream)
if tot_num_errors:
cprint("Total number of errors: %d" % tot_num_errors, "red", file=stream)
print("", file=stream)
if self.all_ok:
cprint("\nall_ok reached\n", "green", file=stream)
def show_events(self, status=None, nids=None):
"""
Print the Abinit events (ERRORS, WARNIING, COMMENTS) to stdout
Args:
status: if not None, only the tasks with this status are select
nids: optional list of node identifiers used to filter the tasks.
"""
nrows, ncols = get_terminal_size()
for task in self.iflat_tasks(status=status, nids=nids):
report = task.get_event_report()
if report:
print(make_banner(str(task), width=ncols, mark="="))
print(report)
#report = report.filter_types()
def show_corrections(self, status=None, nids=None):
"""
Show the corrections applied to the flow at run-time.
Args:
status: if not None, only the tasks with this status are select.
nids: optional list of node identifiers used to filter the tasks.
Return: The number of corrections found.
"""
nrows, ncols = get_terminal_size()
count = 0
for task in self.iflat_tasks(status=status, nids=nids):
if task.num_corrections == 0: continue
count += 1
print(make_banner(str(task), width=ncols, mark="="))
for corr in task.corrections:
pprint(corr)
if not count: print("No correction found.")
return count
def show_history(self, status=None, nids=None, full_history=False, metadata=False):
"""
Print the history of the flow to stdout.
Args:
status: if not None, only the tasks with this status are select
full_history: Print full info set, including nodes with an empty history.
nids: optional list of node identifiers used to filter the tasks.
metadata: print history metadata (experimental)
"""
nrows, ncols = get_terminal_size()
works_done = []
# Loop on the tasks and show the history of the work is not in works_done
for task in self.iflat_tasks(status=status, nids=nids):
work = task.work
if work not in works_done:
works_done.append(work)
if work.history or full_history:
cprint(make_banner(str(work), width=ncols, mark="="), **work.status.color_opts)
print(work.history.to_string(metadata=metadata))
if task.history or full_history:
cprint(make_banner(str(task), width=ncols, mark="="), **task.status.color_opts)
print(task.history.to_string(metadata=metadata))
# Print the history of the flow.
if self.history or full_history:
cprint(make_banner(str(self), width=ncols, mark="="), **self.status.color_opts)
print(self.history.to_string(metadata=metadata))
def show_inputs(self, varnames=None, nids=None, wslice=None, stream=sys.stdout):
"""
Print the input of the tasks to the given stream.
Args:
varnames:
List of Abinit variables. If not None, only the variable in varnames
are selected and printed.
nids:
List of node identifiers. By defaults all nodes are shown
wslice:
Slice object used to select works.
stream:
File-like object, Default: sys.stdout
"""
if varnames is not None:
# Build dictionary varname --> [(task1, value), (task2, value), ...]
varnames = [s.strip() for s in list_strings(varnames)]
dlist = collections.defaultdict(list)
for task in self.select_tasks(nids=nids, wslice=wslice):
dstruct = task.input.structure.as_dict(fmt="abivars")
for vname in varnames:
value = task.input.get(vname, None)
if value is None: # maybe in structure?
value = dstruct.get(vname, None)
if value is not None:
dlist[vname].append((task, value))
for vname in varnames:
tv_list = dlist[vname]
if not tv_list:
stream.write("[%s]: Found 0 tasks with this variable\n" % vname)
else:
stream.write("[%s]: Found %s tasks with this variable\n" % (vname, len(tv_list)))
for i, (task, value) in enumerate(tv_list):
stream.write(" %s --> %s\n" % (str(value), task))
stream.write("\n")
else:
lines = []
for task in self.select_tasks(nids=nids, wslice=wslice):
s = task.make_input(with_header=True)
# Add info on dependencies.
if task.deps:
s += "\n\nDependencies:\n" + "\n".join(str(dep) for dep in task.deps)
else:
s += "\n\nDependencies: None"
lines.append(2*"\n" + 80 * "=" + "\n" + s + 2*"\n")
stream.writelines(lines)
def listext(self, ext, stream=sys.stdout):
"""
Print to the given `stream` a table with the list of the output files
with the given `ext` produced by the flow.
"""
nodes_files = []
for node in self.iflat_nodes():
filepath = node.outdir.has_abiext(ext)
if filepath:
nodes_files.append((node, File(filepath)))
if nodes_files:
print("Found %s files with extension %s produced by the flow" % (len(nodes_files), ext), file=stream)
table = [[f.relpath, "%.2f" % (f.get_stat().st_size / 1024**2),
node.node_id, node.__class__.__name__]
for node, f in nodes_files]
print(tabulate(table, headers=["File", "Size [Mb]", "Node_ID", "Node Class"]), file=stream)
else:
print("No output file with extension %s has been produced by the flow" % ext, file=stream)
def select_tasks(self, nids=None, wslice=None):
"""
Return a list with a subset of tasks.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
.. note::
nids and wslice are mutually exclusive.
If no argument is provided, the full list of tasks is returned.
"""
if nids is not None:
assert wslice is None
tasks = self.tasks_from_nids(nids)
elif wslice is not None:
tasks = []
for work in self[wslice]:
tasks.extend([t for t in work])
else:
# All tasks selected if no option is provided.
tasks = list(self.iflat_tasks())
return tasks
def inspect(self, nids=None, wslice=None, **kwargs):
"""
Inspect the tasks (SCF iterations, Structural relaxation ...) and
produces matplotlib plots.
Args:
nids: List of node identifiers.
wslice: Slice object used to select works.
kwargs: keyword arguments passed to `task.inspect` method.
.. note::
nids and wslice are mutually exclusive.
If nids and wslice are both None, all tasks in self are inspected.
Returns:
List of `matplotlib` figures.
"""
figs = []
for task in self.select_tasks(nids=nids, wslice=wslice):
if hasattr(task, "inspect"):
fig = task.inspect(**kwargs)
if fig is None:
cprint("Cannot inspect Task %s" % task, color="blue")
else:
figs.append(fig)
else:
cprint("Task %s does not provide an inspect method" % task, color="blue")
return figs
def get_results(self, **kwargs):
results = self.Results.from_node(self)
results.update(self.get_dict_for_mongodb_queries())
return results
def get_dict_for_mongodb_queries(self):
"""
This function returns a dictionary with the attributes that will be
put in the mongodb document to facilitate the query.
Subclasses may want to replace or extend the default behaviour.
"""
d = {}
return d
# TODO
all_structures = [task.input.structure for task in self.iflat_tasks()]
all_pseudos = [task.input.pseudos for task in self.iflat_tasks()]
def look_before_you_leap(self):
"""
This method should be called before running the calculation to make
sure that the most important requirements are satisfied.
Return:
List of strings with inconsistencies/errors.
"""
errors = []
try:
self.check_dependencies()
except self.Error as exc:
errors.append(str(exc))
if self.has_db:
try:
self.manager.db_connector.get_collection()
except Exception as exc:
errors.append("""
ERROR while trying to connect to the MongoDB database:
Exception:
%s
Connector:
%s
""" % (exc, self.manager.db_connector))
return "\n".join(errors)
@property
def has_db(self):
"""True if flow uses `MongoDB` to store the results."""
return self.manager.has_db
def db_insert(self):
"""
Insert results in the `MongDB` database.
"""
assert self.has_db
# Connect to MongoDb and get the collection.
coll = self.manager.db_connector.get_collection()
print("Mongodb collection %s with count %d", coll, coll.count())
start = time.time()
for work in self:
for task in work:
results = task.get_results()
pprint(results)
results.update_collection(coll)
results = work.get_results()
pprint(results)
results.update_collection(coll)
print("MongoDb update done in %s [s]" % time.time() - start)
results = self.get_results()
pprint(results)
results.update_collection(coll)
# Update the pickle file to save the mongo ids.
self.pickle_dump()
for d in coll.find():
pprint(d)
def tasks_from_nids(self, nids):
"""
Return the list of tasks associated to the given list of node identifiers (nids).
.. note::
Invalid ids are ignored
"""
if not isinstance(nids, collections.Iterable): nids = [nids]
tasks = []
for nid in nids:
for task in self.iflat_tasks():
if task.node_id == nid:
tasks.append(task)
break
return tasks
def wti_from_nids(self, nids):
"""Return the list of (w, t) indices from the list of node identifiers nids."""
return [task.pos for task in self.tasks_from_nids(nids)]
def open_files(self, what="o", status=None, op="==", nids=None, editor=None):
"""
Open the files of the flow inside an editor (command line interface).
Args:
what: string with the list of characters selecting the file type
Possible choices:
i ==> input_file,
o ==> output_file,
f ==> files_file,
j ==> job_file,
l ==> log_file,
e ==> stderr_file,
q ==> qout_file,
all ==> all files.
status: if not None, only the tasks with this status are select
op: status operator. Requires status. A task is selected
if task.status op status evaluates to true.
nids: optional list of node identifiers used to filter the tasks.
editor: Select the editor. None to use the default editor ($EDITOR shell env var)
"""
# Build list of files to analyze.
files = []
for task in self.iflat_tasks(status=status, op=op, nids=nids):
lst = task.select_files(what)
if lst:
files.extend(lst)
return Editor(editor=editor).edit_files(files)
def parse_timing(self, nids=None):
"""
Parse the timer data in the main output file(s) of Abinit.
Requires timopt /= 0 in the input file (usually timopt = -1)
Args:
nids: optional list of node identifiers used to filter the tasks.
Return: :class:`AbinitTimerParser` instance, None if error.
"""
# Get the list of output files according to nids.
paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)]
# Parse data.
from .abitimer import AbinitTimerParser
parser = AbinitTimerParser()
read_ok = parser.parse(paths)
if read_ok:
return parser
return None
def show_abierrors(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the list of ABINIT errors for all tasks whose status is S_ABICRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
app = lines.append
for task in self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
app(header)
report = task.get_event_report()
if report is not None:
app("num_errors: %s, num_warnings: %s, num_comments: %s" % (
report.num_errors, report.num_warnings, report.num_comments))
app("*** ERRORS ***")
app("\n".join(str(e) for e in report.errors))
app("*** BUGS ***")
app("\n".join(str(b) for b in report.bugs))
else:
app("get_envent_report returned None!")
app("=" * len(header) + 2*"\n")
return stream.writelines(lines)
def show_qouts(self, nids=None, stream=sys.stdout):
"""
Write to the given stream the content of the queue output file for all tasks whose status is S_QCRITICAL.
Args:
nids: optional list of node identifiers used to filter the tasks.
stream: File-like object. Default: sys.stdout
"""
lines = []
for task in self.iflat_tasks(status=self.S_QCRITICAL, nids=nids):
header = "=== " + task.qout_file.path + "==="
lines.append(header)
if task.qout_file.exists:
with open(task.qout_file.path, "rt") as fh:
lines += fh.readlines()
else:
lines.append("File does not exist!")
lines.append("=" * len(header) + 2*"\n")
return stream.writelines(lines)
def debug(self, status=None, nids=None):
"""
This method is usually used when the flow didn't completed succesfully
It analyzes the files produced the tasks to facilitate debugging.
Info are printed to stdout.
Args:
status: If not None, only the tasks with this status are selected
nids: optional list of node identifiers used to filter the tasks.
"""
nrows, ncols = get_terminal_size()
# Test for scheduler exceptions first.
sched_excfile = os.path.join(self.workdir, "_exceptions")
if os.path.exists(sched_excfile):
with open(sched_excfile, "r") as fh:
cprint("Found exceptions raised by the scheduler", "red")
cprint(fh.read(), color="red")
return
if status is not None:
tasks = list(self.iflat_tasks(status=status, nids=nids))
else:
errors = list(self.iflat_tasks(status=self.S_ERROR, nids=nids))
qcriticals = list(self.iflat_tasks(status=self.S_QCRITICAL, nids=nids))
abicriticals = list(self.iflat_tasks(status=self.S_ABICRITICAL, nids=nids))
tasks = errors + qcriticals + abicriticals
# For each task selected:
# 1) Check the error files of the task. If not empty, print the content to stdout and we are done.
# 2) If error files are empty, look at the master log file for possible errors
# 3) If also this check failes, scan all the process log files.
# TODO: This check is not needed if we introduce a new __abinit_error__ file
# that is created by the first MPI process that invokes MPI abort!
#
ntasks = 0
for task in tasks:
print(make_banner(str(task), width=ncols, mark="="))
ntasks += 1
# Start with error files.
for efname in ["qerr_file", "stderr_file",]:
err_file = getattr(task, efname)
if err_file.exists:
s = err_file.read()
if not s: continue
print(make_banner(str(err_file), width=ncols, mark="="))
cprint(s, color="red")
#count += 1
# Check main log file.
try:
report = task.get_event_report()
if report and report.num_errors:
print(make_banner(os.path.basename(report.filename), width=ncols, mark="="))
s = "\n".join(str(e) for e in report.errors)
else:
s = None
except Exception as exc:
s = str(exc)
count = 0 # count > 0 means we found some useful info that could explain the failures.
if s is not None:
cprint(s, color="red")
count += 1
if not count:
# Inspect all log files produced by the other nodes.
log_files = task.tmpdir.list_filepaths(wildcard="*LOG_*")
if not log_files:
cprint("No *LOG_* file in tmpdir. This usually happens if you are running with many CPUs", color="magenta")
for log_file in log_files:
try:
report = EventsParser().parse(log_file)
if report.errors:
print(report)
count += 1
break
except Exception as exc:
cprint(str(exc), color="red")
count += 1
break
if not count:
cprint("Houston, we could not find any error message that can explain the problem", color="magenta")
print("Number of tasks analyzed: %d" % ntasks)
def cancel(self, nids=None):
"""
Cancel all the tasks that are in the queue.
nids is an optional list of node identifiers used to filter the tasks.
Returns:
Number of jobs cancelled, negative value if error
"""
if self.has_chrooted:
# TODO: Use paramiko to kill the job?
warnings.warn("Cannot cancel the flow via sshfs!")
return -1
# If we are running with the scheduler, we must send a SIGKILL signal.
if os.path.exists(self.pid_file):
cprint("Found scheduler attached to this flow.", "yellow")
cprint("Sending SIGKILL to the scheduler before cancelling the tasks!", "yellow")
with open(self.pid_file, "r") as fh:
pid = int(fh.readline())
retcode = os.system("kill -9 %d" % pid)
self.history.info("Sent SIGKILL to the scheduler, retcode: %s" % retcode)
try:
os.remove(self.pid_file)
except IOError:
pass
num_cancelled = 0
for task in self.iflat_tasks(nids=nids):
num_cancelled += task.cancel()
return num_cancelled
def get_njobs_in_queue(self, username=None):
"""
returns the number of jobs in the queue, None when the number of jobs cannot be determined.
Args:
username: (str) the username of the jobs to count (default is to autodetect)
"""
return self.manager.qadapter.get_njobs_in_queue(username=username)
def rmtree(self, ignore_errors=False, onerror=None):
"""Remove workdir (same API as shutil.rmtree)."""
if not os.path.exists(self.workdir): return
shutil.rmtree(self.workdir, ignore_errors=ignore_errors, onerror=onerror)
def rm_and_build(self):
"""Remove the workdir and rebuild the flow."""
self.rmtree()
self.build()
def build(self, *args, **kwargs):
"""Make directories and files of the `Flow`."""
# Allocate here if not done yet!
if not self.allocated: self.allocate()
self.indir.makedirs()
self.outdir.makedirs()
self.tmpdir.makedirs()
# Check the nodeid file in workdir
nodeid_path = os.path.join(self.workdir, ".nodeid")
if os.path.exists(nodeid_path):
with open(nodeid_path, "rt") as fh:
node_id = int(fh.read())
if self.node_id != node_id:
msg = ("\nFound node_id %s in file:\n\n %s\n\nwhile the node_id of the present flow is %d.\n"
"This means that you are trying to build a new flow in a directory already used by another flow.\n"
"Possible solutions:\n"
" 1) Change the workdir of the new flow.\n"
" 2) remove the old directory either with `rm -rf` or by calling the method flow.rmtree()\n"
% (node_id, nodeid_path, self.node_id))
raise RuntimeError(msg)
else:
with open(nodeid_path, "wt") as fh:
fh.write(str(self.node_id))
for work in self:
work.build(*args, **kwargs)
def build_and_pickle_dump(self, abivalidate=False):
"""
Build dirs and file of the `Flow` and save the object in pickle format.
Returns 0 if success
Args:
abivalidate: If True, all the input files are validate by calling
the abinit parser. If the validation fails, ValueError is raise.
"""
self.build()
if not abivalidate: return self.pickle_dump()
# Validation with Abinit.
isok, errors = self.abivalidate_inputs()
if isok: return self.pickle_dump()
errlines = []
for i, e in enumerate(errors):
errlines.append("[%d] %s" % (i, e))
raise ValueError("\n".join(errlines))
@check_spectator
def pickle_dump(self):
"""
Save the status of the object in pickle format.
Returns 0 if success
"""
if self.has_chrooted:
warnings.warn("Cannot pickle_dump since we have chrooted from %s" % self.has_chrooted)
return -1
#if self.in_spectator_mode:
# warnings.warn("Cannot pickle_dump since flow is in_spectator_mode")
# return -2
protocol = self.pickle_protocol
# Atomic transaction with FileLock.
with FileLock(self.pickle_file):
with AtomicFile(self.pickle_file, mode="wb") as fh:
pmg_pickle_dump(self, fh, protocol=protocol)
return 0
def pickle_dumps(self, protocol=None):
"""
Return a string with the pickle representation.
`protocol` selects the pickle protocol. self.pickle_protocol is
used if `protocol` is None
"""
strio = StringIO()
pmg_pickle_dump(self, strio,
protocol=self.pickle_protocol if protocol is None
else protocol)
return strio.getvalue()
def register_task(self, input, deps=None, manager=None, task_class=None):
"""
Utility function that generates a `Work` made of a single task
Args:
input: :class:`AbinitInput`
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the :class:`TaskManager` specified during the creation of the work.
task_class: Task subclass to instantiate. Default: :class:`AbinitTask`
Returns:
The generated :class:`Work` for the task, work[0] is the actual task.
"""
work = Work(manager=manager)
task = work.register(input, deps=deps, task_class=task_class)
self.register_work(work)
return work
def register_work(self, work, deps=None, manager=None, workdir=None):
"""
Register a new :class:`Work` and add it to the internal list, taking into account possible dependencies.
Args:
work: :class:`Work` object.
deps: List of :class:`Dependency` objects specifying the dependency of this node.
An empy list of deps implies that this node has no dependencies.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the work.
workdir: The name of the directory used for the :class:`Work`.
Returns:
The registered :class:`Work`.
"""
if getattr(self, "workdir", None) is not None:
# The flow has a directory, build the named of the directory of the work.
work_workdir = None
if workdir is None:
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
else:
work_workdir = os.path.join(self.workdir, os.path.basename(workdir))
work.set_workdir(work_workdir)
if manager is not None:
work.set_manager(manager)
self.works.append(work)
if deps:
deps = [Dependency(node, exts) for node, exts in deps.items()]
work.add_deps(deps)
return work
def register_work_from_cbk(self, cbk_name, cbk_data, deps, work_class, manager=None):
"""
Registers a callback function that will generate the :class:`Task` of the :class:`Work`.
Args:
cbk_name: Name of the callback function (must be a bound method of self)
cbk_data: Additional data passed to the callback function.
deps: List of :class:`Dependency` objects specifying the dependency of the work.
work_class: :class:`Work` class to instantiate.
manager: The :class:`TaskManager` responsible for the submission of the task.
If manager is None, we use the `TaskManager` specified during the creation of the :class:`Flow`.
Returns:
The :class:`Work` that will be finalized by the callback.
"""
# TODO: pass a Work factory instead of a class
# Directory of the Work.
work_workdir = os.path.join(self.workdir, "w" + str(len(self)))
# Create an empty work and register the callback
work = work_class(workdir=work_workdir, manager=manager)
self._works.append(work)
deps = [Dependency(node, exts) for node, exts in deps.items()]
if not deps:
raise ValueError("A callback must have deps!")
work.add_deps(deps)
# Wrap the callable in a Callback object and save
# useful info such as the index of the work and the callback data.
cbk = FlowCallback(cbk_name, self, deps=deps, cbk_data=cbk_data)
self._callbacks.append(cbk)
return work
@property
def allocated(self):
"""Numer of allocations. Set by `allocate`."""
try:
return self._allocated
except AttributeError:
return 0
def allocate(self, workdir=None):
"""
Allocate the `Flow` i.e. assign the `workdir` and (optionally)
the :class:`TaskManager` to the different tasks in the Flow.
Args:
workdir: Working directory of the flow. Must be specified here
if we haven't initialized the workdir in the __init__.
"""
if workdir is not None:
# We set the workdir of the flow here
self.set_workdir(workdir)
for i, work in enumerate(self):
work.set_workdir(os.path.join(self.workdir, "w" + str(i)))
if not hasattr(self, "workdir"):
raise RuntimeError("You must call flow.allocate(workdir) if the workdir is not passed to __init__")
for work in self:
# Each work has a reference to its flow.
work.allocate(manager=self.manager)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
self.check_dependencies()
if not hasattr(self, "_allocated"): self._allocated = 0
self._allocated += 1
return self
def use_smartio(self):
"""
This function should be called when the entire `Flow` has been built.
It tries to reduce the pressure on the hard disk by using Abinit smart-io
capabilities for those files that are not needed by other nodes.
Smart-io means that big files (e.g. WFK) are written only if the calculation
is unconverged so that we can restart from it. No output is produced if
convergence is achieved.
"""
if not self.allocated:
self.allocate()
#raise RuntimeError("You must call flow.allocate before invoking flow.use_smartio")
return
for task in self.iflat_tasks():
children = task.get_children()
if not children:
# Change the input so that output files are produced
# only if the calculation is not converged.
task.history.info("Will disable IO for task")
task.set_vars(prtwf=-1, prtden=0) # TODO: prt1wf=-1,
else:
must_produce_abiexts = []
for child in children:
# Get the list of dependencies. Find that task
for d in child.deps:
must_produce_abiexts.extend(d.exts)
must_produce_abiexts = set(must_produce_abiexts)
#print("must_produce_abiexts", must_produce_abiexts)
# Variables supporting smart-io.
smart_prtvars = {
"prtwf": "WFK",
}
# Set the variable to -1 to disable the output
for varname, abiext in smart_prtvars.items():
if abiext not in must_produce_abiexts:
print("%s: setting %s to -1" % (task, varname))
task.set_vars({varname: -1})
#def new_from_input_decorators(self, new_workdir, decorators)
# """
# Return a new :class:`Flow` in which all the Abinit inputs have been
# decorated by decorators.
# """
# # The trick part here is how to assign a new id to the new nodes while maintaing the
# # correct dependencies! The safest approach would be to pass through __init__
# # instead of using copy.deepcopy()
# return flow
def show_dependencies(self, stream=sys.stdout):
"""Writes to the given stream the ASCII representation of the dependency tree."""
def child_iter(node):
return [d.node for d in node.deps]
def text_str(node):
return colored(str(node), color=node.status.color_opts["color"])
for task in self.iflat_tasks():
print(draw_tree(task, child_iter, text_str), file=stream)
def on_dep_ok(self, signal, sender):
# TODO
# Replace this callback with dynamic dispatch
# on_all_S_OK for work
# on_S_OK for task
logger.info("on_dep_ok with sender %s, signal %s" % (str(sender), signal))
for i, cbk in enumerate(self._callbacks):
if not cbk.handle_sender(sender):
logger.info("%s does not handle sender %s" % (cbk, sender))
continue
if not cbk.can_execute():
logger.info("Cannot execute %s" % cbk)
continue
# Execute the callback and disable it
self.history.info("flow in on_dep_ok: about to execute callback %s" % str(cbk))
cbk()
cbk.disable()
# Update the database.
self.pickle_dump()
@check_spectator
def finalize(self):
"""
This method is called when the flow is completed.
Return 0 if success
"""
if self.finalized:
self.history.warning("Calling finalize on an already finalized flow.")
return 1
self.history.info("Calling flow.finalize.")
self.finalized = True
if self.has_db:
self.history.info("Saving results in database.")
try:
self.flow.db_insert()
self.finalized = True
except Exception:
logger.critical("MongoDb insertion failed.")
return 2
# Here we remove the big output files if we have the garbage collector
# and the policy is set to "flow."
if self.gc is not None and self.gc.policy == "flow":
self.history.info("gc.policy set to flow. Will clean task output files.")
for task in self.iflat_tasks():
task.clean_output_files()
return 0
def set_garbage_collector(self, exts=None, policy="task"):
"""
Enable the garbage collector that will remove the big output files that are not needed.
Args:
exts: string or list with the Abinit file extensions to be removed. A default is
provided if exts is None
policy: Either `flow` or `task`. If policy is set to 'task', we remove the output
files as soon as the task reaches S_OK. If 'flow', the files are removed
only when the flow is finalized. This option should be used when we are dealing
with a dynamic flow with callbacks generating other tasks since a :class:`Task`
might not be aware of its children when it reached S_OK.
"""
assert policy in ("task", "flow")
exts = list_strings(exts) if exts is not None else ("WFK", "SUS", "SCR", "BSR", "BSC")
gc = GarbageCollector(exts=set(exts), policy=policy)
self.set_gc(gc)
for work in self:
#work.set_gc(gc) # TODO Add support for Works and flow policy
for task in work:
task.set_gc(gc)
def connect_signals(self):
"""
Connect the signals within the `Flow`.
The `Flow` is responsible for catching the important signals raised from its works.
"""
# Connect the signals inside each Work.
for work in self:
work.connect_signals()
# Observe the nodes that must reach S_OK in order to call the callbacks.
for cbk in self._callbacks:
#cbk.enable()
for dep in cbk.deps:
logger.info("connecting %s \nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
dispatcher.connect(self.on_dep_ok, signal=dep.node.S_OK, sender=dep.node, weak=False)
# Associate to each signal the callback _on_signal
# (bound method of the node that will be called by `Flow`
# Each node will set its attribute _done_signal to True to tell
# the flow that this callback should be disabled.
# Register the callbacks for the Work.
#for work in self:
# slot = self._sig_slots[work]
# for signal in S_ALL:
# done_signal = getattr(work, "_done_ " + signal, False)
# if not done_sig:
# cbk_name = "_on_" + str(signal)
# cbk = getattr(work, cbk_name, None)
# if cbk is None: continue
# slot[work][signal].append(cbk)
# print("connecting %s\nwith sender %s, signal %s" % (str(cbk), dep.node, dep.node.S_OK))
# dispatcher.connect(self.on_dep_ok, signal=signal, sender=dep.node, weak=False)
# Register the callbacks for the Tasks.
#self.show_receivers()
def disconnect_signals(self):
"""Disable the signals within the `Flow`."""
# Disconnect the signals inside each Work.
for work in self:
work.disconnect_signals()
# Disable callbacks.
for cbk in self._callbacks:
cbk.disable()
def show_receivers(self, sender=None, signal=None):
sender = sender if sender is not None else dispatcher.Any
signal = signal if signal is not None else dispatcher.Any
print("*** live receivers ***")
for rec in dispatcher.liveReceivers(dispatcher.getReceivers(sender, signal)):
print("receiver -->", rec)
print("*** end live receivers ***")
def set_spectator_mode(self, mode=True):
"""
When the flow is in spectator_mode, we have to disable signals, pickle dump and possible callbacks
A spectator can still operate on the flow but the new status of the flow won't be saved in
the pickle file. Usually the flow is in spectator mode when we are already running it via
the scheduler or other means and we should not interfere with its evolution.
This is the reason why signals and callbacks must be disabled.
Unfortunately preventing client-code from calling methods with side-effects when
the flow is in spectator mode is not easy (e.g. flow.cancel will cancel the tasks submitted to the
queue and the flow used by the scheduler won't see this change!
"""
# Set the flags of all the nodes in the flow.
mode = bool(mode)
self.in_spectator_mode = mode
for node in self.iflat_nodes():
node.in_spectator_mode = mode
# connect/disconnect signals depending on mode.
if not mode:
self.connect_signals()
else:
self.disconnect_signals()
#def get_results(self, **kwargs)
def rapidfire(self, check_status=True, **kwargs):
"""
Use :class:`PyLauncher` to submits tasks in rapidfire mode.
kwargs contains the options passed to the launcher.
Return:
number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).rapidfire()
def single_shot(self, check_status=True, **kwargs):
"""
Use :class:`PyLauncher` to submits one task.
kwargs contains the options passed to the launcher.
Return:
number of tasks submitted.
"""
self.check_pid_file()
self.set_spectator_mode(False)
if check_status: self.check_status()
from .launcher import PyLauncher
return PyLauncher(self, **kwargs).single_shot()
def make_scheduler(self, **kwargs):
"""
Build a return a :class:`PyFlowScheduler` to run the flow.
Args:
kwargs: if empty we use the user configuration file.
if `filepath` in kwargs we init the scheduler from filepath.
else pass **kwargs to :class:`PyFlowScheduler` __init__ method.
"""
from .launcher import PyFlowScheduler
if not kwargs:
# User config if kwargs is empty
sched = PyFlowScheduler.from_user_config()
else:
# Use from_file if filepath if present, else call __init__
filepath = kwargs.pop("filepath", None)
if filepath is not None:
assert not kwargs
sched = PyFlowScheduler.from_file(filepath)
else:
sched = PyFlowScheduler(**kwargs)
sched.add_flow(self)
return sched
def batch(self, timelimit=None):
"""
Run the flow in batch mode, return exit status of the job script.
Requires a manager.yml file and a batch_adapter adapter.
Args:
timelimit: Time limit (int with seconds or string with time given with the slurm convention:
"days-hours:minutes:seconds"). If timelimit is None, the default value specified in the
`batch_adapter` entry of `manager.yml` is used.
"""
from .launcher import BatchLauncher
# Create a batch dir from the flow.workdir.
prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1])
prev_dir = os.path.join(os.path.sep, prev_dir)
workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch")
return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)
def make_light_tarfile(self, name=None):
"""Lightweight tarball file. Mainly used for debugging. Return the name of the tarball file."""
name = os.path.basename(self.workdir) + "-light.tar.gz" if name is None else name
return self.make_tarfile(name=name, exclude_dirs=["outdata", "indata", "tmpdata"])
def make_tarfile(self, name=None, max_filesize=None, exclude_exts=None, exclude_dirs=None, verbose=0, **kwargs):
"""
Create a tarball file.
Args:
name: Name of the tarball file. Set to os.path.basename(`flow.workdir`) + "tar.gz"` if name is None.
max_filesize (int or string with unit): a file is included in the tar file if its size <= max_filesize
Can be specified in bytes e.g. `max_files=1024` or with a string with unit e.g. `max_filesize="1 Mb"`.
No check is done if max_filesize is None.
exclude_exts: List of file extensions to be excluded from the tar file.
exclude_dirs: List of directory basenames to be excluded.
verbose (int): Verbosity level.
kwargs: keyword arguments passed to the :class:`TarFile` constructor.
Returns:
The name of the tarfile.
"""
def any2bytes(s):
"""Convert string or number to memory in bytes."""
if is_string(s):
return int(Memory.from_string(s).to("b"))
else:
return int(s)
if max_filesize is not None:
max_filesize = any2bytes(max_filesize)
if exclude_exts:
# Add/remove ".nc" so that we can simply pass "GSR" instead of "GSR.nc"
# Moreover this trick allows one to treat WFK.nc and WFK file on the same footing.
exts = []
for e in list_strings(exclude_exts):
exts.append(e)
if e.endswith(".nc"):
exts.append(e.replace(".nc", ""))
else:
exts.append(e + ".nc")
exclude_exts = exts
def filter(tarinfo):
"""
Function that takes a TarInfo object argument and returns the changed TarInfo object.
If it instead returns None the TarInfo object will be excluded from the archive.
"""
# Skip links.
if tarinfo.issym() or tarinfo.islnk():
if verbose: print("Excluding link: %s" % tarinfo.name)
return None
# Check size in bytes
if max_filesize is not None and tarinfo.size > max_filesize:
if verbose: print("Excluding %s due to max_filesize" % tarinfo.name)
return None
# Filter filenames.
if exclude_exts and any(tarinfo.name.endswith(ext) for ext in exclude_exts):
if verbose: print("Excluding %s due to extension" % tarinfo.name)
return None
# Exlude directories (use dir basenames).
if exclude_dirs and any(dir_name in exclude_dirs for dir_name in tarinfo.name.split(os.path.sep)):
if verbose: print("Excluding %s due to exclude_dirs" % tarinfo.name)
return None
return tarinfo
back = os.getcwd()
os.chdir(os.path.join(self.workdir, ".."))
import tarfile
name = os.path.basename(self.workdir) + ".tar.gz" if name is None else name
with tarfile.open(name=name, mode='w:gz', **kwargs) as tar:
tar.add(os.path.basename(self.workdir), arcname=None, recursive=True, exclude=None, filter=filter)
# Add the script used to generate the flow.
if self.pyfile is not None and os.path.exists(self.pyfile):
tar.add(self.pyfile)
os.chdir(back)
return name
#def abirobot(self, ext, check_status=True, nids=None):
# """
# Builds and return the :class:`Robot` subclass from the file extension `ext`.
# `nids` is an optional list of node identifiers used to filter the tasks in the flow.
# """
# from abipy.abilab import abirobot
# if check_status: self.check_status()
# return abirobot(flow=self, ext=ext, nids=nids):
@add_fig_kwargs
def plot_networkx(self, mode="network", with_edge_labels=False, ax=None,
node_size="num_cores", node_label="name_class", layout_type="spring", **kwargs):
"""
Use networkx to draw the flow with the connections among the nodes and
the status of the tasks.
Args:
mode: `networkx` to show connections, `status` to group tasks by status.
with_edge_labels: True to draw edge labels.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
node_size: By default, the size of the node is proportional to the number of cores used.
node_label: By default, the task class is used to label node.
layout_type: Get positions for all nodes using `layout_type`. e.g. pos = nx.spring_layout(g)
.. warning::
Requires networkx package.
"""
if not self.allocated: self.allocate()
import networkx as nx
# Build the graph
g, edge_labels = nx.Graph(), {}
tasks = list(self.iflat_tasks())
for task in tasks:
g.add_node(task, name=task.name)
for child in task.get_children():
g.add_edge(task, child)
# TODO: Add getters! What about locked nodes!
i = [dep.node for dep in child.deps].index(task)
edge_labels[(task, child)] = " ".join(child.deps[i].exts)
# Get positions for all nodes using layout_type.
# e.g. pos = nx.spring_layout(g)
pos = getattr(nx, layout_type + "_layout")(g)
# Select function used to compute the size of the node
make_node_size = dict(num_cores=lambda task: 300 * task.manager.num_cores)[node_size]
# Select function used to build the label
make_node_label = dict(name_class=lambda task: task.pos_str + "\n" + task.__class__.__name__,)[node_label]
labels = {task: make_node_label(task) for task in g.nodes()}
ax, fig, plt = get_ax_fig_plt(ax=ax)
# Select plot type.
if mode == "network":
nx.draw_networkx(g, pos, labels=labels,
node_color=[task.color_rgb for task in g.nodes()],
node_size=[make_node_size(task) for task in g.nodes()],
width=1, style="dotted", with_labels=True, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
elif mode == "status":
# Group tasks by status.
for status in self.ALL_STATUS:
tasks = list(self.iflat_tasks(status=status))
# Draw nodes (color is given by status)
node_color = status.color_opts["color"]
if node_color is None: node_color = "black"
#print("num nodes %s with node_color %s" % (len(tasks), node_color))
nx.draw_networkx_nodes(g, pos,
nodelist=tasks,
node_color=node_color,
node_size=[make_node_size(task) for task in tasks],
alpha=0.5, ax=ax
#label=str(status),
)
# Draw edges.
nx.draw_networkx_edges(g, pos, width=2.0, alpha=0.5, arrows=True, ax=ax) # edge_color='r')
# Draw labels
nx.draw_networkx_labels(g, pos, labels, font_size=12, ax=ax)
# Draw edge labels
if with_edge_labels:
nx.draw_networkx_edge_labels(g, pos, edge_labels=edge_labels, ax=ax)
#label_pos=0.5, font_size=10, font_color='k', font_family='sans-serif', font_weight='normal',
# alpha=1.0, bbox=None, ax=None, rotate=True, **kwds)
else:
raise ValueError("Unknown value for mode: %s" % str(mode))
ax.axis("off")
return fig
class G0W0WithQptdmFlow(Flow):
def __init__(self, workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None):
"""
Build a :class:`Flow` for one-shot G0W0 calculations.
The computation of the q-points for the screening is parallelized with qptdm
i.e. we run independent calculations for each q-point and then we merge the final results.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: Input(s) for the SIGMA run(s).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
"""
super(G0W0WithQptdmFlow, self).__init__(workdir, manager=manager)
# Register the first work (GS + NSCF calculation)
bands_work = self.register_work(BandStructureWork(scf_input, nscf_input))
# Register the callback that will be executed the work for the SCR with qptdm.
scr_work = self.register_work_from_cbk(cbk_name="cbk_qptdm_workflow", cbk_data={"input": scr_input},
deps={bands_work.nscf_task: "WFK"}, work_class=QptdmWork)
# The last work contains a list of SIGMA tasks
# that will use the data produced in the previous two works.
if not isinstance(sigma_inputs, (list, tuple)):
sigma_inputs = [sigma_inputs]
sigma_work = Work()
for sigma_input in sigma_inputs:
sigma_work.register_sigma_task(sigma_input, deps={bands_work.nscf_task: "WFK", scr_work: "SCR"})
self.register_work(sigma_work)
self.allocate()
def cbk_qptdm_workflow(self, cbk):
"""
This callback is executed by the flow when bands_work.nscf_task reaches S_OK.
It computes the list of q-points for the W(q,G,G'), creates nqpt tasks
in the second work (QptdmWork), and connect the signals.
"""
scr_input = cbk.data["input"]
# Use the WFK file produced by the second
# Task in the first Work (NSCF step).
nscf_task = self[0][1]
wfk_file = nscf_task.outdir.has_abiext("WFK")
work = self[1]
work.set_manager(self.manager)
work.create_tasks(wfk_file, scr_input)
work.add_deps(cbk.deps)
work.set_flow(self)
# Each task has a reference to its work.
for task in work:
task.set_work(work)
# Add the garbage collector.
if self.gc is not None: task.set_gc(self.gc)
work.connect_signals()
work.build()
return work
class FlowCallbackError(Exception):
"""Exceptions raised by FlowCallback."""
class FlowCallback(object):
"""
This object implements the callbacks executed by the :class:`flow` when
particular conditions are fulfilled. See on_dep_ok method of :class:`Flow`.
.. note::
I decided to implement callbacks via this object instead of a standard
approach based on bound methods because:
1) pickle (v<=3) does not support the pickling/unplickling of bound methods
2) There's some extra logic and extra data needed for the proper functioning
of a callback at the flow level and this object provides an easy-to-use interface.
"""
Error = FlowCallbackError
def __init__(self, func_name, flow, deps, cbk_data):
"""
Args:
func_name: String with the name of the callback to execute.
func_name must be a bound method of flow with signature:
func_name(self, cbk)
where self is the Flow instance and cbk is the callback
flow: Reference to the :class:`Flow`
deps: List of dependencies associated to the callback
The callback is executed when all dependencies reach S_OK.
cbk_data: Dictionary with additional data that will be passed to the callback via self.
"""
self.func_name = func_name
self.flow = flow
self.deps = deps
self.data = cbk_data or {}
self._disabled = False
def __str__(self):
return "%s: %s bound to %s" % (self.__class__.__name__, self.func_name, self.flow)
def __call__(self):
"""Execute the callback."""
if self.can_execute():
# Get the bound method of the flow from func_name.
# We use this trick because pickle (format <=3) does not support bound methods.
try:
func = getattr(self.flow, self.func_name)
except AttributeError as exc:
raise self.Error(str(exc))
return func(self)
else:
raise self.Error("You tried to __call_ a callback that cannot be executed!")
def can_execute(self):
"""True if we can execute the callback."""
return not self._disabled and all(dep.status == dep.node.S_OK for dep in self.deps)
def disable(self):
"""
True if the callback has been disabled. This usually happens when the callback has been executed.
"""
self._disabled = True
def enable(self):
"""Enable the callback"""
self._disabled = False
def handle_sender(self, sender):
"""
True if the callback is associated to the sender
i.e. if the node who sent the signal appears in the
dependencies of the callback.
"""
return sender in [d.node for d in self.deps]
# Factory functions.
def bandstructure_flow(workdir, scf_input, nscf_input, dos_inputs=None, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for band structure calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
dos_inputs: Input(s) for the NSCF run (dos run).
manager: :class:`TaskManager` object used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow subclass
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = BandStructureWork(scf_input, nscf_input, dos_inputs=dos_inputs)
flow.register_work(work)
# Handy aliases
flow.scf_task, flow.nscf_task, flow.dos_tasks = work.scf_task, work.nscf_task, work.dos_tasks
if allocate: flow.allocate()
return flow
def g0w0_flow(workdir, scf_input, nscf_input, scr_input, sigma_inputs, manager=None, flow_class=Flow, allocate=True):
"""
Build a :class:`Flow` for one-shot $G_0W_0$ calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
nscf_input: Input for the NSCF run (band structure run).
scr_input: Input for the SCR run.
sigma_inputs: List of inputs for the SIGMA run.
flow_class: Flow class
manager: :class:`TaskManager` object used to submit the jobs.
Initialized from manager.yml if manager is None.
allocate: True if the flow should be allocated before returning.
Returns:
:class:`Flow` object
"""
flow = flow_class(workdir, manager=manager)
work = G0W0Work(scf_input, nscf_input, scr_input, sigma_inputs)
flow.register_work(work)
if allocate: flow.allocate()
return flow
class PhononFlow(Flow):
"""
1) One workflow for the GS run.
2) nqpt works for phonon calculations. Each work contains
nirred tasks where nirred is the number of irreducible phonon perturbations
for that particular q-point.
"""
@classmethod
def from_scf_input(cls, workdir, scf_input, ph_ngqpt, with_becs=True, manager=None, allocate=True):
"""
Create a `PhononFlow` for phonon calculations from an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.
ph_ngqpt: q-mesh for phonons. Must be a sub-mesh of the k-mesh used for
electrons. e.g if ngkpt = (8, 8, 8). ph_ngqpt = (4, 4, 4) is a valid choice
whereas ph_ngqpt = (3, 3, 3) is not!
with_becs: True if Born effective charges are wanted.
manager: :class:`TaskManager` object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
Return:
:class:`PhononFlow` object.
"""
flow = cls(workdir, manager=manager)
# Register the SCF task
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
# Make sure k-mesh and q-mesh are compatible.
scf_ngkpt, ph_ngqpt = np.array(scf_input["ngkpt"]), np.array(ph_ngqpt)
if any(scf_ngkpt % ph_ngqpt != 0):
raise ValueError("ph_ngqpt %s should be a sub-mesh of scf_ngkpt %s" % (ph_ngqpt, scf_ngkpt))
# Get the q-points in the IBZ from Abinit
qpoints = scf_input.abiget_ibz(ngkpt=ph_ngqpt, shiftk=(0,0,0), kptopt=1).points
# Create a PhononWork for each q-point. Add DDK and E-field if q == Gamma and with_becs.
for qpt in qpoints:
if np.allclose(qpt, 0) and with_becs:
ph_work = BecWork.from_scf_task(scf_task)
else:
ph_work = PhononWork.from_scf_task(scf_task, qpoints=qpt)
flow.register_work(ph_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
:class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
def finalize(self):
"""This method is called when the flow is completed."""
# Merge all the out_DDB files found in work.outdir.
ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self]))
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
print("Final DDB file available at %s" % out_ddb)
# Call the method of the super class.
retcode = super(PhononFlow, self).finalize()
#print("retcode", retcode)
#if retcode != 0: return retcode
return retcode
class NonLinearCoeffFlow(Flow):
"""
1) One workflow for the GS run.
2) nqpt works for electric field calculations. Each work contains
nirred tasks where nirred is the number of irreducible perturbations
for that particular q-point.
"""
@classmethod
def from_scf_input(cls, workdir, scf_input, manager=None, allocate=True):
"""
Create a `NonlinearFlow` for second order susceptibility calculations from an `AbinitInput` defining a ground-state run.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object with the parameters for the GS-SCF run.
manager: :class:`TaskManager` object. Read from `manager.yml` if None.
allocate: True if the flow should be allocated before returning.
Return:
:class:`NonlinearFlow` object.
"""
flow = cls(workdir, manager=manager)
flow.register_scf_task(scf_input)
scf_task = flow[0][0]
nl_work = DteWork.from_scf_task(scf_task)
flow.register_work(nl_work)
if allocate: flow.allocate()
return flow
def open_final_ddb(self):
"""
Open the DDB file located in the output directory of the flow.
Return:
:class:`DdbFile` object, None if file could not be found or file is not readable.
"""
ddb_path = self.outdir.has_abiext("DDB")
if not ddb_path:
if self.status == self.S_OK:
logger.critical("%s reached S_OK but didn't produce a GSR file in %s" % (self, self.outdir))
return None
from abipy.dfpt.ddb import DdbFile
try:
return DdbFile(ddb_path)
except Exception as exc:
logger.critical("Exception while reading DDB file at %s:\n%s" % (ddb_path, str(exc)))
return None
def finalize(self):
"""This method is called when the flow is completed."""
# Merge all the out_DDB files found in work.outdir.
ddb_files = list(filter(None, [work.outdir.has_abiext("DDB") for work in self]))
# Final DDB file will be produced in the outdir of the work.
out_ddb = self.outdir.path_in("out_DDB")
desc = "DDB file merged by %s on %s" % (self.__class__.__name__, time.asctime())
mrgddb = wrappers.Mrgddb(manager=self.manager, verbose=0)
mrgddb.merge(self.outdir.path, ddb_files, out_ddb=out_ddb, description=desc)
print("Final DDB file available at %s" % out_ddb)
# Call the method of the super class.
retcode = super(NonLinearCoeffFlow, self).finalize()
print("retcode", retcode)
#if retcode != 0: return retcode
return retcode
# Alias for compatibility reasons. For the time being, DO NOT REMOVE
nonlinear_coeff_flow = NonLinearCoeffFlow
def phonon_flow(workdir, scf_input, ph_inputs, with_nscf=False, with_ddk=False, with_dde=False,
manager=None, flow_class=PhononFlow, allocate=True):
"""
Build a :class:`PhononFlow` for phonon calculations.
Args:
workdir: Working directory.
scf_input: Input for the GS SCF run.
ph_inputs: List of Inputs for the phonon runs.
with_nscf: add an nscf task in front of al phonon tasks to make sure the q point is covered
with_ddk: add the ddk step
with_dde: add the dde step it the dde is set ddk is switched on automatically
manager: :class:`TaskManager` used to submit the jobs
Initialized from manager.yml if manager is None.
flow_class: Flow class
Returns:
:class:`Flow` object
"""
logger.critical("phonon_flow is deprecated and could give wrong results")
if with_dde:
with_ddk = True
natom = len(scf_input.structure)
# Create the container that will manage the different works.
flow = flow_class(workdir, manager=manager)
# Register the first work (GS calculation)
# register_task creates a work for the task, registers it to the flow and returns the work
# the 0the element of the work is the task
scf_task = flow.register_task(scf_input, task_class=ScfTask)[0]
# Build a temporary work with a shell manager just to run
# ABINIT to get the list of irreducible pertubations for this q-point.
shell_manager = flow.manager.to_shell_manager(mpi_procs=1)
if with_ddk:
logger.info('add ddk')
# TODO
# MG Warning: be careful here because one should use tolde or tolwfr (tolvrs shall not be used!)
ddk_input = ph_inputs[0].deepcopy()
ddk_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2, rfdir=[1, 1, 1])
ddk_task = flow.register_task(ddk_input, deps={scf_task: 'WFK'}, task_class=DdkTask)[0]
if with_dde:
logger.info('add dde')
dde_input = ph_inputs[0].deepcopy()
dde_input.set_vars(qpt=[0, 0, 0], rfddk=1, rfelfd=2)
dde_input_idir = dde_input.deepcopy()
dde_input_idir.set_vars(rfdir=[1, 1, 1])
dde_task = flow.register_task(dde_input, deps={scf_task: 'WFK', ddk_task: 'DDK'}, task_class=DdeTask)[0]
if not isinstance(ph_inputs, (list, tuple)):
ph_inputs = [ph_inputs]
for i, ph_input in enumerate(ph_inputs):
fake_input = ph_input.deepcopy()
# Run abinit on the front-end to get the list of irreducible pertubations.
tmp_dir = os.path.join(workdir, "__ph_run" + str(i) + "__")
w = PhononWork(workdir=tmp_dir, manager=shell_manager)
fake_task = w.register(fake_input)
# Use the magic value paral_rf = -1 to get the list of irreducible perturbations for this q-point.
abivars = dict(
paral_rf=-1,
rfatpol=[1, natom], # Set of atoms to displace.
rfdir=[1, 1, 1], # Along this set of reduced coordinate axis.
)
fake_task.set_vars(abivars)
w.allocate()
w.start(wait=True)
# Parse the file to get the perturbations.
try:
irred_perts = yaml_read_irred_perts(fake_task.log_file.path)
except:
print("Error in %s" % fake_task.log_file.path)
raise
logger.info(irred_perts)
w.rmtree()
# Now we can build the final list of works:
# One work per q-point, each work computes all
# the irreducible perturbations for a singe q-point.
work_qpt = PhononWork()
if with_nscf:
# MG: Warning this code assume 0 is Gamma!
nscf_input = copy.deepcopy(scf_input)
nscf_input.set_vars(kptopt=3, iscf=-3, qpt=irred_perts[0]['qpt'], nqpt=1)
nscf_task = work_qpt.register_nscf_task(nscf_input, deps={scf_task: "DEN"})
deps = {nscf_task: "WFQ", scf_task: "WFK"}
else:
deps = {scf_task: "WFK"}
if with_ddk:
deps[ddk_task] = 'DDK'
logger.info(irred_perts[0]['qpt'])
for irred_pert in irred_perts:
#print(irred_pert)
new_input = ph_input.deepcopy()
#rfatpol 1 1 # Only the first atom is displaced
#rfdir 1 0 0 # Along the first reduced coordinate axis
qpt = irred_pert["qpt"]
idir = irred_pert["idir"]
ipert = irred_pert["ipert"]
# TODO this will work for phonons, but not for the other types of perturbations.
rfdir = 3 * [0]
rfdir[idir -1] = 1
rfatpol = [ipert, ipert]
new_input.set_vars(
#rfpert=1,
qpt=qpt,
rfdir=rfdir,
rfatpol=rfatpol,
)
if with_ddk:
new_input.set_vars(rfelfd=3)
work_qpt.register_phonon_task(new_input, deps=deps)
flow.register_work(work_qpt)
if allocate: flow.allocate()
return flow
def phonon_conv_flow(workdir, scf_input, qpoints, params, manager=None, allocate=True):
"""
Create a :class:`Flow` to perform convergence studies for phonon calculations.
Args:
workdir: Working directory of the flow.
scf_input: :class:`AbinitInput` object defining a GS-SCF calculation.
qpoints: List of list of lists with the reduced coordinates of the q-point(s).
params:
To perform a converge study wrt ecut: params=["ecut", [2, 4, 6]]
manager: :class:`TaskManager` object responsible for the submission of the jobs.
If manager is None, the object is initialized from the yaml file
located either in the working directory or in the user configuration dir.
allocate: True if the flow should be allocated before returning.
Return:
:class:`Flow` object.
"""
qpoints = np.reshape(qpoints, (-1, 3))
flow = Flow(workdir=workdir, manager=manager)
for qpt in qpoints:
for gs_inp in scf_input.product(*params):
# Register the SCF task
work = flow.register_scf_task(gs_inp)
# Add the PhononWork connected to this scf_task.
flow.register_work(PhononWork.from_scf_task(work[0], qpoints=qpt))
if allocate: flow.allocate()
return flow
|
xhqu1981/pymatgen
|
pymatgen/io/abinit/flows.py
|
Python
|
mit
| 105,989
|
[
"ABINIT",
"pymatgen"
] |
ce357298f6d62cb96159327ebf6d8006309936f1401a3f6ecbab8227d507c279
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.presort = presort
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
min_samples_leaf = self.min_samples_leaf
else: # float
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
min_samples_split = self.min_samples_split
else: # float
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not (0. < self.min_samples_split <= 1. or
2 <= self.min_samples_split):
raise ValueError("min_samples_split must be in at least 2"
" or in (0, 1], got %s" % min_samples_split)
if not (0. < self.min_samples_leaf <= 0.5 or
1 <= self.min_samples_leaf):
raise ValueError("min_samples_leaf must be at least than 1 "
"or in (0, 0.5], got %s" % min_samples_leaf)
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort == True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
presort=presort)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
jmschrei/scikit-learn
|
sklearn/tree/tree.py
|
Python
|
bsd-3-clause
| 40,412
|
[
"Brian"
] |
cd8a8a72712f77237fb1a1b0c06a0877a07282027c232739130cb16de797dd2c
|
""" DIRAC FileCatalog Database """
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryMetadata import DirectoryMetadata
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileMetadata import FileMetadata
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectorySimpleTree import DirectorySimpleTree
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryNodeTree import DirectoryNodeTree
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryLevelTree import DirectoryLevelTree
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DirectoryFlatTree import DirectoryFlatTree
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.WithFkAndPs.DirectoryClosure import DirectoryClosure
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileManagerFlat import FileManagerFlat
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.FileManager import FileManager
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.WithFkAndPs.FileManagerPs import FileManagerPs
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.SEManager import SEManagerCS, SEManagerDB
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.SecurityManager import NoSecurityManager,\
DirectorySecurityManager,\
FullSecurityManager,\
DirectorySecurityManagerWithDelete,\
PolicyBasedSecurityManager
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.UserAndGroupManager import UserAndGroupManagerCS,\
UserAndGroupManagerDB
from DIRAC.DataManagementSystem.DB.FileCatalogComponents.DatasetManager import DatasetManager
from DIRAC.Resources.Catalog.Utilities import checkArgumentFormat
#############################################################################
class FileCatalogDB(DB):
def __init__(self, databaseLocation='DataManagement/FileCatalogDB'):
""" Standard Constructor
"""
# The database location can be specified in System/Database form or in just the Database name
# in the DataManagement system
db = databaseLocation
if db.find('/') == -1:
db = 'DataManagement/' + db
DB.__init__(self, 'FileCatalogDB', db)
def setConfig(self, databaseConfig):
self.directories = {}
# In memory storage of the various parameters
self.users = {}
self.uids = {}
self.groups = {}
self.gids = {}
self.seNames = {}
self.seids = {}
self.seDefinitions = {}
# Obtain some general configuration of the database
self.uniqueGUID = databaseConfig['UniqueGUID']
self.globalReadAccess = databaseConfig['GlobalReadAccess']
self.lfnPfnConvention = databaseConfig['LFNPFNConvention']
if self.lfnPfnConvention == "None":
self.lfnPfnConvention = False
self.resolvePfn = databaseConfig['ResolvePFN']
self.umask = databaseConfig['DefaultUmask']
self.validFileStatus = databaseConfig['ValidFileStatus']
self.validReplicaStatus = databaseConfig['ValidReplicaStatus']
self.visibleFileStatus = databaseConfig['VisibleFileStatus']
self.visibleReplicaStatus = databaseConfig['VisibleReplicaStatus']
try:
# Obtain the plugins to be used for DB interaction
self.ugManager = eval("%s(self)" % databaseConfig['UserGroupManager'])
self.seManager = eval("%s(self)" % databaseConfig['SEManager'])
self.securityManager = eval("%s(self)" % databaseConfig['SecurityManager'])
self.dtree = eval("%s(self)" % databaseConfig['DirectoryManager'])
self.fileManager = eval("%s(self)" % databaseConfig['FileManager'])
self.datasetManager = eval("%s(self)" % databaseConfig['DatasetManager'])
self.dmeta = eval("%s(self)" % databaseConfig['DirectoryMetadata'])
self.fmeta = eval("%s(self)" % databaseConfig['FileMetadata'])
except Exception as x:
gLogger.fatal("Failed to create database objects", x)
return S_ERROR("Failed to create database objects")
return S_OK()
def setUmask(self, umask):
self.umask = umask
########################################################################
#
# SE based write methods
#
def addSE(self, seName, credDict):
"""
Add a new StorageElement
:param str seName: Name of the StorageElement
:param credDict: credential
"""
res = self._checkAdminPermission(credDict)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Permission denied")
return self.seManager.addSE(seName)
def deleteSE(self, seName, credDict):
"""
Delete a StorageElement
:param str seName: Name of the StorageElement
:param credDict: credential
"""
res = self._checkAdminPermission(credDict)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Permission denied")
return self.seManager.deleteSE(seName)
########################################################################
#
# User/groups based write methods
#
def addUser(self, userName, credDict):
"""
Add a new user
:param str userName: Name of the User
:param credDict: credential
"""
res = self._checkAdminPermission(credDict)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Permission denied")
return self.ugManager.addUser(userName)
def deleteUser(self, userName, credDict):
"""
Delete a user
:param str userName: Name of the User
:param credDict: credential
"""
res = self._checkAdminPermission(credDict)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Permission denied")
return self.ugManager.deleteUser(userName)
def addGroup(self, groupName, credDict):
"""
Add a new group
:param str groupName: Name of the group
:param credDict: credential
"""
res = self._checkAdminPermission(credDict)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Permission denied")
return self.ugManager.addGroup(groupName)
def deleteGroup(self, groupName, credDict):
"""
Delete a group
:param str groupName: Name of the group
:param credDict: credential
"""
res = self._checkAdminPermission(credDict)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Permission denied")
return self.ugManager.deleteGroup(groupName)
########################################################################
#
# User/groups based read methods
#
def getUsers(self, credDict):
"""
Returns the list of users
:param credDict: credential
:return: dictionary indexed on the user name
"""
res = self._checkAdminPermission(credDict)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Permission denied")
return self.ugManager.getUsers()
def getGroups(self, credDict):
"""
Returns the list of groups
:param credDict: credential
:return: dictionary indexed on the group name
"""
res = self._checkAdminPermission(credDict)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Permission denied")
return self.ugManager.getGroups()
########################################################################
#
# Path based read methods
#
def exists(self, lfns, credDict):
res = self._checkPathPermissions('exists', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
if res['Value']['Successful']:
res = self.fileManager.exists(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
notExist = []
for lfn in res['Value']['Successful'].keys():
if not successful[lfn]:
notExist.append(lfn)
successful.pop(lfn)
if notExist:
res = self.dtree.exists(notExist)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful.update(res['Value']['Successful'])
return S_OK({'Successful': successful, 'Failed': failed})
def getPathPermissions(self, lfns, credDict):
""" Get permissions for the given user/group to manipulate the given lfns
"""
res = checkArgumentFormat(lfns)
if not res['OK']:
return res
lfns = res['Value']
return self.securityManager.getPathPermissions(lfns.keys(), credDict)
def hasAccess(self, opType, paths, credDict):
""" Get permissions for the given user/group to execute the given operation
on the given paths
returns Successful dict with True/False
"""
res = checkArgumentFormat(paths)
if not res['OK']:
return res
paths = res['Value']
return self.securityManager.hasAccess(opType, paths, credDict)
########################################################################
#
# Path based read methods
#
def changePathOwner(self, paths, credDict, recursive=False):
""" Bulk method to change Owner for the given paths
:param dict paths: dictionary < lfn : owner >
:param dict credDict: dictionary of the caller credentials
:param boolean recursive: flag to apply the operation recursively
"""
res = self._checkPathPermissions('changePathOwner', paths, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
if res['Value']['Successful']:
result = self.__changePathFunction(res['Value']['Successful'], credDict,
self.dtree.changeDirectoryOwner,
self.fileManager.changeFileOwner,
recursive=recursive)
failed.update(result['Value']['Failed'])
successful = result['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def changePathGroup(self, paths, credDict, recursive=False):
""" Bulk method to change Group for the given paths
:param dict paths: dictionary < lfn : group >
:param dict credDict: dictionary of the caller credentials
:param boolean recursive: flag to apply the operation recursively
"""
res = self._checkPathPermissions('changePathGroup', paths, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
if res['Value']['Successful']:
result = self.__changePathFunction(res['Value']['Successful'], credDict,
self.dtree.changeDirectoryGroup,
self.fileManager.changeFileGroup,
recursive=recursive)
failed.update(result['Value']['Failed'])
successful = result['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def changePathMode(self, paths, credDict, recursive=False):
""" Bulk method to change Mode for the given paths
:param dict paths: dictionary < lfn : mode >
:param dict credDict: dictionary of the caller credentials
:param boolean recursive: flag to apply the operation recursively
"""
res = self._checkPathPermissions('changePathMode', paths, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
if res['Value']['Successful']:
result = self.__changePathFunction(res['Value']['Successful'], credDict,
self.dtree.changeDirectoryMode,
self.fileManager.changeFileMode,
recursive=recursive)
failed.update(result['Value']['Failed'])
successful = result['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def __changePathFunction(self, paths, credDict,
change_function_directory,
change_function_file,
recursive=False):
""" A generic function to change Owner, Group or Mode for the given paths
:param dict paths: dictionary < lfn : parameter_value >
:param dict credDict: dictionary of the caller credentials
:param function change_function_directory: function to change directory parameters
:param function change_function_file: function to change file parameters
:param boolean recursive: flag to apply the operation recursively
"""
dirList = []
result = self.isDirectory(paths, credDict)
if not result['OK']:
return result
for di in result['Value']['Successful']:
if result['Value']['Successful'][di]:
dirList.append(di)
fileList = []
if len(dirList) < len(paths):
result = self.isFile(paths, credDict)
if not result['OK']:
return result
for fi in result['Value']['Successful']:
if result['Value']['Successful'][fi]:
fileList.append(fi)
successful = {}
failed = {}
dirArgs = {}
fileArgs = {}
for path in paths:
if (path not in dirList) and (path not in fileList):
failed[path] = 'No such file or directory'
if path in dirList:
dirArgs[path] = paths[path]
elif path in fileList:
fileArgs[path] = paths[path]
if dirArgs:
result = change_function_directory(dirArgs, recursive=recursive)
if not result['OK']:
return result
successful.update(result['Value']['Successful'])
failed.update(result['Value']['Failed'])
if fileArgs:
result = change_function_file(fileArgs)
if not result['OK']:
return result
successful.update(result['Value']['Successful'])
failed.update(result['Value']['Failed'])
return S_OK({'Successful': successful, 'Failed': failed})
########################################################################
#
# File based write methods
#
def addFile(self, lfns, credDict):
"""
Add a new File
:param dict lfns: indexed on file's LFN, the values are dictionaries which contains
the attributes of the files (PFN, SE, Size, GUID, Checksum)
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('addFile', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.addFile(res['Value']['Successful'], credDict)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def setFileStatus(self, lfns, credDict):
"""
Set the status of a File
:param dict lfns: dict indexed on the LFNs. The values are the status (should be in config['ValidFileStatus'])
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('setFileStatus', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.setFileStatus(res['Value']['Successful'], credDict)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def removeFile(self, lfns, credDict):
"""
Remove files
:param lfns: list of LFNs to remove
:type lfns: python:list
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('removeFile', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.removeFile(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def addReplica(self, lfns, credDict):
"""
Add a replica to a File
:param dict lfns: keys are LFN. The values are dict with key PFN and SE
(e.g. {myLfn : {"PFN" : "myPfn", "SE" : "mySE"}})
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('addReplica', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.addReplica(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def removeReplica(self, lfns, credDict):
"""
Remove replicas
:param dict lfns: keys are LFN. The values are dict with key PFN and SE
(e.g. {myLfn : {"PFN" : "myPfn", "SE" : "mySE"}})
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('removeReplica', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.removeReplica(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def setReplicaStatus(self, lfns, credDict):
"""
Set the status of a Replicas
:param dict lfns: dict indexed on the LFNs. The values are dict with keys
"SE" and "Status" (that has to be in config['ValidReplicaStatus'])
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('setReplicaStatus', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.setReplicaStatus(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def setReplicaHost(self, lfns, credDict):
res = self._checkPathPermissions('setReplicaHost', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.setReplicaHost(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def addFileAncestors(self, lfns, credDict):
""" Add ancestor information for the given LFNs
"""
res = self._checkPathPermissions('addFileAncestors', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.addFileAncestors(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
########################################################################
#
# File based read methods
#
def isFile(self, lfns, credDict):
"""
Checks whether a list of LFNS are files or not
:param lfns: list of LFN to check
:type lfns: python:list
:param creDict: credential
:return: Successful/Failed dict.
The values of the successful dict are True or False whether it's a file or not
"""
res = self._checkPathPermissions('isFile', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.isFile(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def getFileSize(self, lfns, credDict):
"""
Gets the size of a list of lfns
:param lfns: list of LFN to check
:type lfns: python:list
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('getFileSize', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.getFileSize(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def getFileMetadata(self, lfns, credDict):
"""
Gets the metadata of a list of lfns
:param lfns: list of LFN to check
:type lfns: python:list
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('getFileMetadata', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.getFileMetadata(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def getReplicas(self, lfns, allStatus, credDict):
"""
Gets the list of replicas of a list of lfns
:param lfns: list of LFN to check
:type lfns: python:list
:param allStatus: if all the status are visible, or only those defined in config['ValidReplicaStatus']
:param creDict: credential
:return: Successful/Failed dict. Successful is indexed on the LFN, and the values are dictionary
with the SEName as keys
"""
res = self._checkPathPermissions('getReplicas', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.getReplicas(res['Value']['Successful'], allStatus=allStatus)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed,
'SEPrefixes': res['Value'].get('SEPrefixes', {})})
def getReplicaStatus(self, lfns, credDict):
"""
Gets the status of a list of replicas
:param dict lfns: <lfn, se name>
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('getReplicaStatus', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.getReplicaStatus(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def getFileAncestors(self, lfns, depths, credDict):
res = self._checkPathPermissions('getFileAncestors', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.getFileAncestors(res['Value']['Successful'], depths)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def getFileDescendents(self, lfns, depths, credDict):
res = self._checkPathPermissions('getFileDescendents', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.fileManager.getFileDescendents(res['Value']['Successful'], depths)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def getFileDetails(self, lfnList, credDict):
""" Get all the metadata for the given files
"""
connection = False
result = self.fileManager._findFiles(lfnList, connection=connection)
if not result['OK']:
return result
resultDict = {}
fileIDDict = {}
lfnDict = result['Value']['Successful']
for lfn in lfnDict:
fileIDDict[lfnDict[lfn]['FileID']] = lfn
result = self.fileManager._getFileMetadataByID(fileIDDict.keys(), connection=connection)
if not result['OK']:
return result
for fileID in result['Value']:
resultDict[fileIDDict[fileID]] = result['Value'][fileID]
result = self.fmeta._getFileUserMetadataByID(fileIDDict.keys(), credDict, connection=connection)
if not result['OK']:
return result
for fileID in fileIDDict:
resultDict[fileIDDict[fileID]].setdefault('Metadata', {})
if fileID in result['Value']:
resultDict[fileIDDict[fileID]]['Metadata'] = result['Value'][fileID]
return S_OK(resultDict)
def getLFNForGUID(self, guids, credDict):
"""
Gets the lfns that match a list of guids
:param lfns: list of guid to look for
:type lfns: python:list
:param creDict: credential
:return: S_OK({guid:lfn}) dict.
"""
return self.fileManager.getLFNForGUID(guids)
########################################################################
#
# Directory based Write methods
#
def createDirectory(self, lfns, credDict):
"""
Create new directories
:param lfns: list of directories
:type lfns: python:list
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('createDirectory', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.dtree.createDirectory(res['Value']['Successful'], credDict)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def removeDirectory(self, lfns, credDict):
"""
Remove directories
:param lfns: list of directories
:type lfns: python:list
:param creDict: credential
:return: Successful/Failed dict.
"""
res = self._checkPathPermissions('removeDirectory', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.dtree.removeDirectory(res['Value']['Successful'], credDict)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
if not successful:
return S_OK({'Successful': successful, 'Failed': failed})
# Remove the directory metadata now
dirIdList = [successful[p]['DirID'] for p in successful if 'DirID' in successful[p]]
result = self.dmeta.removeMetadataForDirectory(dirIdList, credDict)
if not result['OK']:
return result
failed.update(result['Value']['Failed'])
# We remove from The successful those that failed in the metadata removal
map(lambda x: successful.pop(x) if x in successful else None, failed)
# We update the successful
successful.update(result["Value"]["Successful"])
return S_OK({'Successful': successful, 'Failed': failed})
########################################################################
#
# Directory based read methods
#
def listDirectory(self, lfns, credDict, verbose=False):
"""
List directories
:param lfns: list of directories
:type lfns: python:list
:param creDict: credential
:return: Successful/Failed dict.
The successful values are dictionaries indexed "Files", "Datasets", "Subdirs" and "Links"
"""
res = self._checkPathPermissions('listDirectory', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.dtree.listDirectory(res['Value']['Successful'], verbose=verbose)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def isDirectory(self, lfns, credDict):
"""
Checks whether a list of LFNS are directories or not
:param lfns: list of LFN to check
:type lfns: python:list
:param creDict: credential
:return: Successful/Failed dict.
The values of the successful dict are True or False whether it's a dir or not
"""
res = self._checkPathPermissions('isDirectory', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.dtree.isDirectory(res['Value']['Successful'])
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed})
def getDirectoryReplicas(self, lfns, allStatus, credDict):
res = self._checkPathPermissions('getDirectoryReplicas', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.dtree.getDirectoryReplicas(res['Value']['Successful'], allStatus)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
return S_OK({'Successful': successful, 'Failed': failed,
'SEPrefixes': res['Value'].get('SEPrefixes', {})})
def getDirectorySize(self, lfns, longOutput, fromFiles, credDict):
"""
Get the sizes of a list of directories
:param lfns: list of LFN to check
:type lfns: python:list
:param creDict: credential
:return: Successful/Failed dict.
The successful values are dictionaries indexed "LogicalFiles" (nb of files),
"LogicalDirectories" (nb of dir) and "LogicalSize" (sum of File's sizes)
"""
res = self._checkPathPermissions('getDirectorySize', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
# if no successful, just return
if not res['Value']['Successful']:
return S_OK({'Successful': {}, 'Failed': failed})
res = self.dtree.getDirectorySize(res['Value']['Successful'], longOutput, fromFiles)
if not res['OK']:
return res
failed.update(res['Value']['Failed'])
successful = res['Value']['Successful']
queryTime = res['Value'].get('QueryTime', -1.)
return S_OK({'Successful': successful, 'Failed': failed, 'QueryTime': queryTime})
def getDirectoryMetadata(self, lfns, credDict):
''' Get standard directory metadata
:param lfns: list of directory paths
:type lfns: python:list
:param dict credDict: credentials
:return: Successful/Failed dict.
'''
res = self._checkPathPermissions('getDirectoryMetadata', lfns, credDict)
if not res['OK']:
return res
failed = res['Value']['Failed']
successful = {}
for lfn in res['Value']['Successful']:
result = self.dtree.getDirectoryParameters(lfn)
if result['OK']:
successful[lfn] = result['Value']
else:
failed[lfn] = result['Message']
return S_OK({'Successful': successful, 'Failed': failed})
def rebuildDirectoryUsage(self):
""" Rebuild DirectoryUsage table from scratch
"""
result = self.dtree._rebuildDirectoryUsage()
return result
def repairCatalog(self, directoryFlag=True, credDict={}):
""" Repair catalog inconsistencies
"""
result = S_OK()
if directoryFlag:
result = self.dtree.recoverOrphanDirectories(credDict)
return result
#######################################################################
#
# Catalog metadata methods
#
def setMetadata(self, path, metadataDict, credDict):
""" Add metadata to the given path
"""
res = self._checkPathPermissions('setMetadata', path, credDict)
if not res['OK']:
return res
if not res['Value']['Successful']:
return S_ERROR('Permission denied')
if not res['Value']['Successful'][path]:
return S_ERROR('Permission denied')
result = self.dtree.isDirectory({path: True})
if not result['OK']:
return result
if not result['Value']['Successful']:
return S_ERROR('Failed to determine the path type')
if result['Value']['Successful'][path]:
# This is a directory
return self.dmeta.setMetadata(path, metadataDict, credDict)
else:
# This is a file
return self.fmeta.setMetadata(path, metadataDict, credDict)
def setMetadataBulk(self, pathMetadataDict, credDict):
""" Add metadata for the given paths
"""
successful = {}
failed = {}
for path, metadataDict in pathMetadataDict.items():
result = self.setMetadata(path, metadataDict, credDict)
if result['OK']:
successful[path] = True
else:
failed[path] = result['Message']
return S_OK({'Successful': successful, 'Failed': failed})
def removeMetadata(self, pathMetadataDict, credDict):
""" Remove metadata for the given paths
"""
successful = {}
failed = {}
for path, metadataDict in pathMetadataDict.items():
result = self.__removeMetadata(path, metadataDict, credDict)
if result['OK']:
successful[path] = True
else:
failed[path] = result['Message']
return S_OK({'Successful': successful, 'Failed': failed})
def __removeMetadata(self, path, metadata, credDict):
""" Remove metadata from the given path
"""
res = self._checkPathPermissions('__removeMetadata', path, credDict)
if not res['OK']:
return res
if not res['Value']['Successful']:
return S_ERROR('Permission denied')
if not res['Value']['Successful'][path]:
return S_ERROR('Permission denied')
result = self.dtree.isDirectory({path: True})
if not result['OK']:
return result
if not result['Value']['Successful']:
return S_ERROR('Failed to determine the path type')
if result['Value']['Successful'][path]:
# This is a directory
return self.dmeta.removeMetadata(path, metadata, credDict)
else:
# This is a file
return self.fmeta.removeMetadata(path, metadata, credDict)
#######################################################################
#
# Catalog admin methods
#
def getCatalogCounters(self, credDict):
counterDict = {}
res = self._checkAdminPermission(credDict)
if not res['OK']:
return res
if not res['Value']:
return S_ERROR("Permission denied")
# res = self.dtree.getDirectoryCounters()
# if not res['OK']:
# return res
# counterDict.update(res['Value'])
res = self.fileManager.getFileCounters()
if not res['OK']:
return res
counterDict.update(res['Value'])
res = self.fileManager.getReplicaCounters()
if not res['OK']:
return res
counterDict.update(res['Value'])
res = self.dtree.getDirectoryCounters()
if not res['OK']:
return res
counterDict.update(res['Value'])
return S_OK(counterDict)
########################################################################
#
# Security based methods
#
def _checkAdminPermission(self, credDict):
return self.securityManager.hasAdminAccess(credDict)
def _checkPathPermissions(self, operation, lfns, credDict):
res = checkArgumentFormat(lfns)
if not res['OK']:
return res
lfns = res['Value']
res = self.securityManager.hasAccess(operation, lfns.keys(), credDict)
if not res['OK']:
return res
# Do not consider those paths for which we failed to determine access
failed = res['Value']['Failed']
for lfn in failed.keys():
lfns.pop(lfn)
# Do not consider those paths for which access is denied
successful = {}
for lfn, access in res['Value']['Successful'].items():
if not access:
failed[lfn] = 'Permission denied'
else:
successful[lfn] = lfns[lfn]
return S_OK({'Successful': successful, 'Failed': failed})
def getSEDump(self, seName):
"""
Return all the files at a given SE, together with checksum and size
:param seName: name of the StorageElement
:returns: S_OK with list of tuples (lfn, checksum, size)
"""
return self.fileManager.getSEDump(seName)
|
andresailer/DIRAC
|
DataManagementSystem/DB/FileCatalogDB.py
|
Python
|
gpl-3.0
| 39,108
|
[
"DIRAC"
] |
66eacd375b48ce8d25efe48e8fbb1aebd13da65b93f3e1c38c379605cf8f6fc7
|
#
# Copyright (c) 2008--2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import time
import string
import rpm
import sys
import xmlrpclib
from types import IntType, ListType, DictType
# common module
from spacewalk.common import rhnCache, rhnFlags, rhn_rpm
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnException import rhnFault, rhnException
from spacewalk.common.rhnTranslate import _
# local module
import rhnUser, rhnSQL, rhnLib
class NoBaseChannelError(Exception):
pass
class InvalidServerArchError(Exception):
pass
class BaseChannelDeniedError(Exception):
pass
class ChannelException(Exception):
def __init__(self, channel_id=None, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
self.channel_id = channel_id
self.channel = None
class SubscriptionCountExceeded(ChannelException):
pass
class ModifiedError(ChannelException):
pass
class IncompatibilityError(Exception):
pass
class InvalidDataError(Exception):
pass
class ChannelNotFoundError(Exception):
pass
class NoToolsChannel(Exception):
pass
class NoChildChannels(Exception):
pass
class InvalidChannel(Exception):
pass
class BaseDatabaseObject:
def __init__(self):
self._row = None
def __getattr__(self, name):
if name.startswith('get_'):
return rhnLib.CallableObj(name[4:], self._get)
if name.startswith('set_'):
return rhnLib.CallableObj(name[4:], self._set)
raise AttributeError(name)
def _set(self, name, val):
self._new_row()
self._row[name] = val
def _get(self, name):
return self._row[name]
def _new_row(self):
raise NotImplementedError()
def save(self, with_updates=1):
try:
return self._save(with_updates=with_updates)
except:
rhnSQL.rollback()
raise
def _save(self, with_updates=1):
try:
self._row.save(with_updates=with_updates)
except rhnSQL.ModifiedRowError:
raise ModifiedError(self._row['id']), None, sys.exc_info()[2]
class BaseChannelObject(BaseDatabaseObject):
_table_name = None
_sequence_name = None
_generic_fields = []
def load_by_label(self, label):
self.__init__()
self._row = rhnSQL.Row(self._table_name, 'label')
self._row.load(label)
return self
def load_by_id(self, obj_id):
self.__init__()
self._row = rhnSQL.Row(self._table_name, 'id')
self._row.load(obj_id)
return self
def load_from_dict(self, dict):
# Re-init
self.__init__()
for f in self._generic_fields:
method = getattr(self, 'set_' + f)
method(dict.get(f))
self._load_rest(dict)
return self
def _load_rest(self, dict):
pass
def exists(self):
if not self._row:
return 0
return self._row.real
def get_org_id(self):
org_id = self._row['org_id']
if org_id is None:
return None
row = self._lookup_org_id(org_id)
if row.real:
return row['login']
return org_id
def set_org_id(self, val):
self._new_row()
if val is None or isinstance(val, IntType):
self._row['org_id'] = val
return
row = self._lookup_org_by_login(val)
if not row.real:
raise InvalidDataError("No such org", val)
self._row['org_id'] = row['org_id']
def _lookup_org_id(self, org_id):
row = rhnSQL.Row('web_contact', 'org_id')
row.load(org_id)
return row
def _lookup_org_by_login(self, login):
row = rhnSQL.Row('web_contact', 'login')
row.load(login)
return row
def _lookup_channel_family_by_id(self, channel_family_id):
row = rhnSQL.Row('rhnChannelFamily', 'id')
row.load(channel_family_id)
return row
def _lookup_channel_family_by_label(self, channel_family):
row = rhnSQL.Row('rhnChannelFamily', 'label')
row.load(channel_family)
return row
def _new_row(self):
if self._row is None:
self._row = rhnSQL.Row(self._table_name, 'id')
channel_id = rhnSQL.Sequence(self._sequence_name).next()
self._row.create(channel_id)
def as_dict(self):
ret = {}
for f in self._generic_fields:
method = getattr(self, 'get_' + f)
val = method()
ret[f] = val
return ret
# Channel creation
class Channel(BaseChannelObject):
_table_name = 'rhnChannel'
_sequence_name = 'rhn_channel_id_seq'
_generic_fields = ['label', 'name', 'summary', 'description', 'basedir',
'org_id', 'gpg_key_url', 'gpg_key_id', 'gpg_key_fp', 'end_of_life',
'channel_families', 'channel_arch',]
def __init__(self):
BaseChannelObject.__init__(self)
self._channel_families = []
self._dists = {}
self._parent_channel_arch = None
def load_by_label(self, label):
BaseChannelObject.load_by_label(self, label)
self._load_channel_families()
self._load_dists()
return self
def load_by_id(self, label):
BaseChannelObject.load_by_id(self, label)
self._load_channel_families()
self._load_dists()
return self
def _load_rest(self, dict):
dists = dict.get('dists')
if not dists:
return
for dist in dists:
release = dist.get('release')
os = dist.get('os')
self._dists[release] = os
_query_get_db_channel_families = rhnSQL.Statement("""
select channel_family_id
from rhnChannelFamilyMembers
where channel_id = :channel_id
""")
def _get_db_channel_families(self, channel_id):
if channel_id is None:
return []
h = rhnSQL.prepare(self._query_get_db_channel_families)
h.execute(channel_id=channel_id)
return map(lambda x: x['channel_family_id'], h.fetchall_dict() or [])
def _load_channel_families(self):
channel_id = self._row.get('id')
self._channel_families = self._get_db_channel_families(channel_id)
return 1
def _load_dists(self):
channel_id = self._row.get('id')
dists = self._get_db_dists(channel_id)
self.set_dists(dists)
_query_get_db_dists = rhnSQL.Statement("""
select os, release
from rhnDistChannelMap
where channel_id = :channel_id
and org_id is null
""")
def _get_db_dists(self, channel_id):
if channel_id is None:
return []
h = rhnSQL.prepare(self._query_get_db_dists)
h.execute(channel_id=channel_id)
return h.fetchall_dict() or []
# Setters
def set_channel_arch(self, val):
self._new_row()
arch = self._sanitize_arch(val)
row = self._lookup_channel_arch(arch)
if not row.real:
raise InvalidDataError("No such architecture", arch)
self._row['channel_arch_id'] = row['id']
def _sanitize_arch(self, arch):
if arch == 'i386':
return 'channel-ia32'
p = 'channel-'
if arch[:len(p)] != p:
return p + arch
return arch
def set_parent_channel(self, val):
self._new_row()
if val is None:
self._row['parent_channel'] = None
return
row = self._lookup_channel_by_label(val)
if not row.real:
raise InvalidDataError("Invalid parent channel", val)
self._row['parent_channel'] = row['id']
self._parent_channel_arch = row['channel_arch_id']
def set_channel_families(self, val):
self._new_row()
self._channel_families = []
for cf_label in val:
self.add_channel_family(cf_label)
def set_end_of_life(self, val):
self._new_row()
if val is None:
self._row['end_of_life'] = None
return
t = time.strptime(val, "%Y-%m-%d")
seconds = time.mktime(t)
t = rhnSQL.TimestampFromTicks(seconds)
self._row['end_of_life'] = t
def add_channel_family(self, name):
self._new_row()
cf = self._lookup_channel_family_by_label(name)
if not cf.real:
raise InvalidDataError("Invalid channel family", name)
self._channel_families.append(cf['id'])
def add_dist(self, release, os=None):
if os is None:
os = 'Red Hat Linux'
self._dists[release] = os
def set_dists(self, val):
self._dists.clear()
for h in val:
release = h['release']
os = h['os']
self.add_dist(release, os)
# Getters
def get_parent_channel(self):
pc_id = self._row['parent_channel']
if pc_id is None:
return None
return self._lookup_channel_by_id(pc_id)['label']
def get_channel_families(self):
cf_labels = []
for cf_id in self._channel_families:
row = self._lookup_channel_family_by_id(cf_id)
if row.real:
cf_labels.append(row['label'])
return cf_labels
def get_channel_arch(self):
channel_arch_id = self._row['channel_arch_id']
row = self._lookup_channel_arch_by_id(channel_arch_id)
assert row.real
return row['label']
def get_end_of_life(self):
date_obj = self._row['end_of_life']
if date_obj is None:
return None
return "%s-%02d-%02d %02d:%02d:%02d" % (
date_obj.year, date_obj.month, date_obj.day,
date_obj.hour, date_obj.minute, date_obj.second)
def get_dists(self):
ret = []
for release, os in self._dists.items():
ret.append({'release' : release, 'os' : os})
return ret
def _lookup_channel_by_id(self, channel_id):
row = rhnSQL.Row('rhnChannel', 'id')
row.load(channel_id)
return row
def _lookup_channel_by_label(self, channel):
row = rhnSQL.Row('rhnChannel', 'label')
row.load(channel)
return row
def _lookup_channel_arch(self, channel_arch):
row = rhnSQL.Row('rhnChannelArch', 'label')
row.load(channel_arch)
return row
def _lookup_channel_arch_by_id(self, channel_arch_id):
row = rhnSQL.Row('rhnChannelArch', 'id')
row.load(channel_arch_id)
return row
def _save(self, with_updates=1):
if self._parent_channel_arch:
if not self._compatible_channel_arches(self._parent_channel_arch,
self._row['channel_arch_id']):
raise IncompatibilityError("Incompatible channel arches")
BaseChannelObject._save(self, with_updates=with_updates)
# Save channel families now
self._save_channel_families()
self._save_dists()
_query_remove_channel_families = rhnSQL.Statement("""
delete from rhnChannelFamilyMembers
where channel_id = :channel_id
and channel_family_id = :channel_family_id
""")
_query_add_channel_families = rhnSQL.Statement("""
insert into rhnChannelFamilyMembers (channel_id, channel_family_id)
values (:channel_id, :channel_family_id)
""")
def _save_channel_families(self):
channel_id = self._row['id']
db_cfids = self._get_db_channel_families(channel_id)
h = {}
for db_cfid in db_cfids:
h[db_cfid] = None
to_add = []
for cfid in self._channel_families:
if h.has_key(cfid):
del h[cfid]
continue
to_add.append(cfid)
to_delete = h.keys()
if to_delete:
h = rhnSQL.prepare(self._query_remove_channel_families)
cids = [channel_id] * len(to_delete)
h.executemany(channel_id=cids, channel_family_id=to_delete)
if to_add:
h = rhnSQL.prepare(self._query_add_channel_families)
cids = [channel_id] * len(to_add)
h.executemany(channel_id=cids, channel_family_id=to_add)
def _save_dists(self):
channel_id = self._row['id']
db_dists = self._get_db_dists(channel_id)
d = self._dists.copy()
to_add = [[], []]
to_remove = []
to_update = [[], []]
for h in db_dists:
release = h['release']
os = h['os']
if not d.has_key(release):
to_remove.append(release)
continue
# Need to update?
m_os = d[release]
if m_os == os:
# Nothing to do
del d[release]
continue
to_update[0].append(release)
to_update[1].append(os)
# Everything else should be added
for release, os in d.items():
to_add[0].append(release)
to_add[1].append(os)
self._remove_dists(to_remove)
self._update_dists(to_update[0], to_update[1])
self._add_dists(to_add[0], to_add[1])
_query_add_dists = rhnSQL.Statement("""
insert into rhnDistChannelMap
(channel_id, channel_arch_id, release, os, org_id)
values (:channel_id, :channel_arch_id, :release, :os, null)
""")
def _add_dists(self, releases, oses):
self._modify_dists(self._query_add_dists, releases, oses)
def _modify_dists(self, query, releases, oses):
if not releases:
return
count = len(releases)
channel_ids = [self._row['id']] * count
query_args = {'channel_id': channel_ids, 'release': releases}
if oses:
channel_arch_ids = [self._row['channel_arch_id']] * count
query_args.update({'channel_arch_id': channel_arch_ids,
'os': oses})
h = rhnSQL.prepare(query)
h.executemany(**query_args)
_query_update_dists = rhnSQL.Statement("""
update rhnDistChannelMap
set channel_arch_id = :channel_arch_id,
os = :os
where channel_id = :channel_id
and release = :release
and org_id is null
""")
def _update_dists(self, releases, oses):
self._modify_dists(self._query_update_dists, releases, oses)
_query_remove_dists = rhnSQL.Statement("""
delete from rhnDistChannelMap
where channel_id = :channel_id
and release = :release
and org_id is null
""")
def _remove_dists(self, releases):
self._modify_dists(self._query_remove_dists, releases, None)
def _compatible_channel_arches(self, parent_channel_arch, channel_arch):
# This could get more complicated later
return (parent_channel_arch == channel_arch)
def as_dict(self):
ret = BaseChannelObject.as_dict(self)
ret['dists'] = self.get_dists()
return ret
class ChannelFamily(BaseChannelObject):
_table_name = 'rhnChannelFamily'
_sequence_name = 'rhn_channel_family_id_seq'
_generic_fields = ['label', 'name', 'product_url']
def _load_by_id(query, item_object, pattern=None):
qargs = {}
if pattern:
query += "and label like :pattern"
qargs['pattern'] = pattern
h = rhnSQL.prepare(query)
h.execute(**qargs)
ret = []
while 1:
row = h.fetchone_dict()
if not row:
break
c = item_object.load_by_id(row['id'])
ret.append(c.as_dict())
return ret
def list_channel_families(pattern=None):
query = """
select id
from rhnChannelFamily
where org_id is null
"""
return _load_by_id(query, ChannelFamily(), pattern)
def list_channels(pattern=None):
query = """
select id
from rhnChannel
where 1=1
"""
return _load_by_id(query, Channel(), pattern)
# makes sure there are no None values in dictionaries, etc.
def __stringify(object):
if object is None:
return ''
if type(object) == type([]):
return map(__stringify, object)
# We need to know __stringify converts immutable types into immutable
# types
if type(object) == type(()):
return tuple(map(__stringify, object))
if type(object) == type({}):
ret = {}
for k, v in object.items():
ret[__stringify(k)] = __stringify(v)
return ret
# by default, we just str() it
return str(object)
# return the channel information
def channel_info(channel):
log_debug(3, channel)
# get the channel information
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from
rhnChannel c,
rhnChannelArch ca
where
c.channel_arch_id = ca.id
and c.label = :channel
""")
h.execute(channel = str(channel))
ret = h.fetchone_dict()
return __stringify(ret)
# return information about a base channel for a server_id
def get_base_channel(server_id, none_ok = 0):
log_debug(3, server_id)
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c, rhnChannelArch ca, rhnServerChannel sc
where sc.server_id = :server_id
and sc.channel_id = c.id
and c.channel_arch_id = ca.id
and c.parent_channel is NULL
""")
h.execute(server_id = str(server_id))
ret = h.fetchone_dict()
if not ret:
if not none_ok:
log_error("Server not subscribed to a base channel!", server_id)
return None
return __stringify(ret)
def channels_for_server(server_id):
"""channel info list for all channels accessible by this server.
list channels a server_id is subscribed to
We DO NOT want to cache this one because we depend on getting
accurate information and the caching would only introduce more
overhead on an otherwise very fast query
"""
log_debug(3, server_id)
try:
server_id = int(server_id)
except:
raise rhnFault(8, server_id), None, sys.exc_info()[2] # Invalid rhnServer.id
# XXX: need to return unsubsubcribed channels and a way to indicate
# they arent already subscribed
# list all the channels this server is subscribed to. We also want
# to know if any of those channels has local packages in it... A
# local package has a org_id set.
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
c.gpg_key_url,
case s.org_id when c.org_id then 1 else 0 end local_channel,
TO_CHAR(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from
rhnChannelArch ca,
rhnChannel c,
rhnServerChannel sc,
rhnServer s
where
c.id = sc.channel_id
and sc.server_id = :server_id
and s.id = :server_id
and ca.id = c.channel_arch_id
order by c.parent_channel nulls first
""")
h.execute(server_id = str(server_id))
channels = h.fetchall_dict()
if not channels:
log_error("Server not subscribed to any channels", server_id)
channels = []
return __stringify(channels)
def isCustomChannel(channel_id):
"""
Input: channel_id (from DB Table rhnChannel.id)
Returns: True if this is a custom channel
False if this is not a custom channel
"""
log_debug(3, channel_id)
h = rhnSQL.prepare("""
select
rcf.label
from
rhnChannelFamily rcf,
rhnChannelFamilyMembers rcfm
where
rcfm.channel_id = :channel_id
and rcfm.channel_family_id = rcf.id
and rcf.org_id is not null
""")
h.execute(channel_id = str(channel_id))
label = h.fetchone();
if label:
if label[0].startswith("private-channel-family"):
log_debug(3, channel_id, "is a custom channel")
return True
return False
# Fetch base channel for a given release and arch
def base_channel_for_rel_arch(release, server_arch, org_id=-1,
user_id=None):
log_debug(4, release, server_arch, org_id, user_id)
query = """
select ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified,
rhn_channel.available_chan_subscriptions(c.id, :org_id) available_subscriptions
from rhnChannel c,
rhnChannelArch ca
where c.channel_arch_id = ca.id
and c.id = rhn_channel.base_channel_for_release_arch(
:release, :server_arch, :org_id, :user_id)
"""
rhnSQL.transaction("base_channel_for_rel_arch")
h = rhnSQL.prepare(query)
try:
h.execute(release = str(release), server_arch = str(server_arch),
org_id=org_id, user_id=user_id)
except rhnSQL.SQLSchemaError, e:
rhnSQL.rollback("base_channel_for_rel_arch")
if e.errno == 20263:
# Insufficient permissions for subscription
log_debug(4,'BaseChannelDeniedError')
raise BaseChannelDeniedError(), None, sys.exc_info()[2]
if e.errno == 20244:
# Server architecture could not be found
log_debug(4,'InvalidServerArchError')
raise InvalidServerArchError(str(server_arch)), None, sys.exc_info()[2]
# Re-raise unknown eceptions
log_debug(4,'unkown exception')
raise
log_debug(4, 'got past exceptions')
return h.fetchone_dict()
def base_eus_channel_for_ver_rel_arch(version, release, server_arch,
org_id=-1, user_id=None):
"""
given a redhat-release version, release, and server arch, return a list
of dicts containing the details of the channel z streams either match the
version/release pair, or are greater.
"""
log_debug(4, version, release, server_arch, org_id, user_id)
eus_channels_query = """
select c.id,
c.label,
c.name,
rcm.release,
c.receiving_updates
from
rhnChannelPermissions cp,
rhnChannel c,
rhnServerArch sa,
rhnServerChannelArchCompat scac,
rhnReleaseChannelMap rcm
where
rcm.version = :version
and scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = rcm.channel_arch_id
and rcm.channel_id = c.id
and cp.channel_id = c.id
and cp.org_id = :org_id
and rhn_channel.loose_user_role_check(c.id, :user_id,
'subscribe') = 1
"""
eus_channels_prepared = rhnSQL.prepare(eus_channels_query)
eus_channels_prepared.execute(version = version,
server_arch = server_arch,
user_id = user_id,
org_id = org_id)
channels = []
while True:
channel = eus_channels_prepared.fetchone_dict()
if channel is None:
break
# the release part of redhat-release for rhel 4 is like
# 6.1 or 7; we just look at the first digit.
# for rhel 5 and up it's the full release number of rhel, followed by
# the true release number of the rpm, like 5.0.0.9 (for the 9th
# version of the redhat-release rpm, for RHEL GA)
db_release = channel['release']
if version in ['4AS', '4ES']:
parts = 1
else:
parts = 3
server_rel = '.'.join(release.split('.')[:parts])
channel_rel = '.'.join(db_release.split('.')[:parts])
# XXX we're no longer using the is_default column from the db
if rpm.labelCompare(('0', server_rel, '0'),
('0', channel_rel, '0')) == 0:
channel['is_default'] = 'Y'
channels.append(channel)
if rpm.labelCompare(('0', server_rel, '0'),
('0', channel_rel, '0')) < 0:
channel['is_default'] = 'N'
channels.append(channel)
return channels
def get_channel_for_release_arch(release, server_arch, org_id = None):
log_debug(3, release, server_arch)
server_arch = rhnLib.normalize_server_arch(str(server_arch))
log_debug(3, 'normalized arch as %s' % server_arch)
if org_id is None:
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnDistChannelMap dcm,
rhnChannel c,
rhnChannelArch ca,
rhnServerChannelArchCompat scac,
rhnServerArch sa
where scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = dcm.channel_arch_id
and dcm.release = :release
and dcm.channel_id = c.id
and dcm.channel_arch_id = c.channel_arch_id
and dcm.org_id is null
and c.parent_channel is null
and c.org_id is null
and c.channel_arch_id = ca.id
"""
else:
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnOrgDistChannelMap odcm,
rhnChannel c,
rhnChannelArch ca,
rhnServerChannelArchCompat scac,
rhnServerArch sa
where scac.server_arch_id = sa.id
and sa.label = :server_arch
and scac.channel_arch_id = odcm.channel_arch_id
and odcm.release = :release
and odcm.channel_id = c.id
and odcm.channel_arch_id = c.channel_arch_id
and odcm.org_id = :org_id
and c.parent_channel is null
and c.org_id is null
and c.channel_arch_id = ca.id
"""
h = rhnSQL.prepare(query)
h.execute(release=str(release), server_arch=server_arch, org_id=org_id)
row = h.fetchone_dict()
if not row:
# No channles for this guy
log_debug(3,'No channles for this guy')
return None
log_debug(3,'row is %s' % str(row))
return row
def applet_channels_for_uuid(uuid):
log_debug(3, uuid)
query = """
select distinct
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified,
to_char(s.channels_changed, 'YYYYMMDDHH24MISS') server_channels_changed
from rhnChannelArch ca,
rhnChannel c,
rhnServerChannel sc,
rhnServer s,
rhnServerUuid su
where su.uuid = :uuid
and su.server_id = s.id
and su.server_id = sc.server_id
and sc.channel_id = c.id
and c.channel_arch_id = ca.id
"""
h = rhnSQL.prepare(query)
h.execute(uuid=uuid)
rows = h.fetchall_dict() or []
return rows
# retrieve a list of public channels for a given release and architecture
# we cannot cache this if it involves an org_id
# If a user_id is passed to this function, and all the available base channels
# for this server_arch/release combination are denied by the org admin, this
# function raises BaseChannelDeniedError
def channels_for_release_arch(release, server_arch, org_id=-1, user_id=None):
if not org_id:
org_id = -1
org_id = string.strip(str(org_id))
log_debug(3, release, server_arch, org_id)
# Can raise BaseChannelDeniedError or InvalidServerArchError
base_channel = base_channel_for_rel_arch(release, server_arch,
org_id=org_id, user_id=user_id)
if not base_channel:
raise NoBaseChannelError()
# At this point, base_channel is not null
# We assume here that subchannels are compatible with the base channels,
# so there would be no need to check for arch compatibility from this
# point
h = rhnSQL.prepare("""
select
ca.label arch,
c.id,
c.parent_channel,
c.org_id,
c.label,
c.name,
c.summary,
c.description,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified,
-- If user_id is null, then the channel is subscribable
rhn_channel.loose_user_role_check(c.id, :user_id, 'subscribe')
subscribable,
rhn_channel.available_chan_subscriptions(c.id, :org_id) available_subscriptions
from
rhnChannelPermissions cp,
rhnOrgDistChannelMap odcm,
rhnChannel c,
rhnChannelArch ca
where
c.id = odcm.channel_id
and odcm.os in (
'Powertools'
)
and odcm.for_org_id = :org_id
and c.channel_arch_id = ca.id
and cp.channel_id = c.id
and cp.org_id = :org_id
and c.parent_channel = :parent_channel
""")
h.execute(org_id = org_id,
parent_channel=base_channel['id'], user_id=user_id)
channels = [base_channel]
while 1:
row = h.fetchone_dict()
if not row:
break
subscribable = row['subscribable']
del row['subscribable']
if not subscribable:
# Not allowed to subscribe to this channel
continue
channels.append(row)
return __stringify(channels)
_query_get_source_packages_from_ids = rhnSQL.Statement("""
select srpm.name
from rhnChannelPackage cp,
rhnPackage p,
rhnSourceRPM srpm
where cp.channel_id = :channel_id
and cp.package_id = p.id
and p.source_rpm_id = srpm.id
""")
def list_packages_source(channel_id):
ret = []
h = rhnSQL.prepare(_query_get_source_packages_from_ids)
h.execute(channel_id=channel_id)
results = h.fetchall_dict()
if results:
for r in results:
r = r['name']
if string.find(r, ".rpm") != -1:
r = string.replace(r, ".rpm", "")
new_evr = rhnLib.make_evr(r,source=1)
new_evr_list = [new_evr['name'], new_evr['version'], new_evr['release'],new_evr['epoch']]
ret.append(new_evr_list)
return ret
# the latest packages from the specified channel
_query_all_packages_from_channel_checksum = """
select
p.id,
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
p.package_size,
ct.label as checksum_type,
c.checksum
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageName pn,
rhnPackageEVR pevr,
rhnPackageArch pa,
rhnChecksumType ct,
rhnChecksum c
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pevr.id
and p.package_arch_id = pa.id
and p.checksum_id = c.id
and c.checksum_type_id = ct.id
order by pn.name, pevr.evr desc, pa.label
"""
# This function executes the SQL call for listing packages with checksum info
def list_all_packages_checksum_sql(channel_id):
log_debug(3, channel_id)
h = rhnSQL.prepare(_query_all_packages_from_channel_checksum)
h.execute(channel_id = str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = map(lambda a: (a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['checksum_type'],
a['checksum']),
__stringify(ret))
return ret
# This function executes the SQL call for listing latest packages with
# checksum info
def list_packages_checksum_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
query = """
select
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
full_channel.package_size,
full_channel.checksum_type,
full_channel.checksum
from
rhnPackageArch pa,
( select
p.name_id,
max(pe.evr) evr
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageEVR pe
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.evr_id = pe.id
group by p.name_id
) listall,
( select distinct
p.package_size,
p.name_id,
p.evr_id,
p.package_arch_id,
ct.label as checksum_type,
c.checksum
from
rhnChannelPackage cp,
rhnPackage p,
rhnChecksumType ct,
rhnChecksum c
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.checksum_id = c.id
and c.checksum_type_id = ct.id
) full_channel,
-- Rank the package's arch
( select
package_arch_id,
count(*) rank
from
rhnServerPackageArchCompat
group by package_arch_id
) arch_rank,
rhnPackageName pn,
rhnPackageEVR pevr
where
pn.id = listall.name_id
-- link back to the specific package
and full_channel.name_id = listall.name_id
and full_channel.evr_id = pevr.id
and pevr.evr = listall.evr
and pa.id = full_channel.package_arch_id
and pa.id = arch_rank.package_arch_id
order by pn.name, arch_rank.rank desc
"""
h = rhnSQL.prepare(query)
h.execute(channel_id = str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = map(lambda a: (a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['checksum_type'],
a['checksum']),
__stringify(ret))
return ret
# This function executes the SQL call for listing packages
def _list_packages_sql(query, channel_id):
h = rhnSQL.prepare(query)
h.execute(channel_id = str(channel_id))
ret = h.fetchall_dict()
if not ret:
return []
# process the results
ret = map(lambda a: (a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"]),
__stringify(ret))
return ret
def list_packages_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
query = """
select
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
full_channel.package_size
from
rhnPackageArch pa,
( select
p.name_id,
max(pe.evr) evr
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageEVR pe
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.evr_id = pe.id
group by p.name_id
) listall,
( select distinct
p.package_size,
p.name_id,
p.evr_id,
p.package_arch_id
from
rhnChannelPackage cp,
rhnPackage p
where
cp.channel_id = :channel_id
and cp.package_id = p.id
) full_channel,
-- Rank the package's arch
( select
package_arch_id,
count(*) rank
from
rhnServerPackageArchCompat
group by package_arch_id
) arch_rank,
rhnPackageName pn,
rhnPackageEVR pevr
where
pn.id = listall.name_id
-- link back to the specific package
and full_channel.name_id = listall.name_id
and full_channel.evr_id = pevr.id
and pevr.evr = listall.evr
and pa.id = full_channel.package_arch_id
and pa.id = arch_rank.package_arch_id
order by pn.name, arch_rank.rank desc
"""
return _list_packages_sql(query, channel_id)
# the latest packages from the specified channel
_query_latest_packages_from_channel = """
select
p.id,
pn.name,
pevr.version,
pevr.release,
pevr.epoch,
pa.label arch,
p.package_size
from
rhnChannelPackage cp,
rhnPackage p,
rhnPackageName pn,
rhnPackageEVR pevr,
rhnPackageArch pa
where
cp.channel_id = :channel_id
and cp.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pevr.id
and p.package_arch_id = pa.id
order by pn.name, pevr.evr desc, pa.label
"""
# This function executes the SQL call for listing packages
def list_all_packages_sql(channel_id):
log_debug(3, channel_id)
return _list_packages_sql(_query_latest_packages_from_channel, channel_id)
# This function executes the SQL call for listing packages with all the
# dep information for each package also
def list_all_packages_complete_sql(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
h = rhnSQL.prepare(_query_latest_packages_from_channel)
# This gathers the provides, requires, conflicts, obsoletes info
g = rhnSQL.prepare("""
select
pp.package_id,
'provides' as capability_type,
pp.capability_id,
pp.sense,
pc.name,
pc.version
from
rhnPackageProvides pp,
rhnPackageCapability pc
where
pp.package_id = :package_id
and pp.capability_id = pc.id
union all
select
pr.package_id,
'requires' as capability_type,
pr.capability_id,
pr.sense,
pc.name,
pc.version
from
rhnPackageRequires pr,
rhnPackageCapability pc
where
pr.package_id = :package_id
and pr.capability_id = pc.id
union all
select
prec.package_id,
'recommends' as capability_type,
prec.capability_id,
prec.sense,
pc.name,
pc.version
from
rhnPackageRecommends prec,
rhnPackageCapability pc
where
prec.package_id = :package_id
and prec.capability_id = pc.id
union all
select
sugg.package_id,
'suggests' as capability_type,
sugg.capability_id,
sugg.sense,
pc.name,
pc.version
from
rhnPackageSuggests sugg,
rhnPackageCapability pc
where
sugg.package_id = :package_id
and sugg.capability_id = pc.id
union all
select
supp.package_id,
'supplements' as capability_type,
supp.capability_id,
supp.sense,
pc.name,
pc.version
from
rhnPackageSupplements supp,
rhnPackageCapability pc
where
supp.package_id = :package_id
and supp.capability_id = pc.id
union all
select
enh.package_id,
'enhances' as capability_type,
enh.capability_id,
enh.sense,
pc.name,
pc.version
from
rhnPackageEnhances enh,
rhnPackageCapability pc
where
enh.package_id = :package_id
and enh.capability_id = pc.id
union all
select
pcon.package_id,
'conflicts' as capability_type,
pcon.capability_id,
pcon.sense,
pc.name,
pc.version
from
rhnPackageConflicts pcon,
rhnPackageCapability pc
where
pcon.package_id = :package_id
and pcon.capability_id = pc.id
union all
select
po.package_id,
'obsoletes' as capability_type,
po.capability_id,
po.sense,
pc.name,
pc.version
from
rhnPackageObsoletes po,
rhnPackageCapability pc
where
po.package_id = :package_id
and po.capability_id = pc.id
union all
select
brks.package_id,
'breaks' as capability_type,
brks.capability_id,
brks.sense,
pc.name,
pc.version
from
rhnPackageBreaks brks,
rhnPackageCapability pc
where
brks.package_id = :package_id
and brks.capability_id = pc.id
union all
select
pdep.package_id,
'predepends' as capability_type,
pdep.capability_id,
pdep.sense,
pc.name,
pc.version
from
rhnPackagePredepends pdep,
rhnPackageCapability pc
where
pdep.package_id = :package_id
and pdep.capability_id = pc.id
""")
h.execute(channel_id = str(channel_id))
# XXX This query has to order the architectures somehow; the 7.2 up2date
# client was broken and was selecting the wrong architecture if athlons
# are passed first. The rank ordering here should make sure that i386
# kernels appear before athlons.
ret = h.fetchall_dict()
if not ret:
return []
for pkgi in ret:
pkgi['provides'] = []
pkgi['requires'] = []
pkgi['conflicts'] = []
pkgi['obsoletes'] = []
pkgi['recommends'] = []
pkgi['suggests'] = []
pkgi['supplements'] = []
pkgi['enhances'] = []
pkgi['breaks'] = []
pkgi['predepends'] = []
g.execute(package_id = pkgi["id"])
deps = g.fetchall_dict() or []
for item in deps:
version = item['version'] or ""
relation = ""
if version:
sense = item['sense'] or 0
if sense & 2: relation = relation + "<"
if sense & 4: relation = relation + ">"
if sense & 8: relation = relation + "="
if relation: relation = " " + relation
if version: version = " " + version
dep = item['name'] + relation + version
pkgi[item['capability_type']].append(dep)
# process the results
ret = map(lambda a: (a["name"], a["version"], a["release"], a["epoch"],
a["arch"], a["package_size"], a['provides'],
a['requires'], a['conflicts'], a['obsoletes'], a['recommends'], a['suggests'], a['supplements'], a['enhances'], a['breaks'], a['predepends']),
__stringify(ret))
return ret
def list_packages_path(channel_id):
log_debug(3, channel_id)
# return the latest packages from the specified channel
h = rhnSQL.prepare("""
select
p.path
from
rhnPackage p,
rhnChannelPackage cp
where
cp.channel_id = :channel_id
and cp.package_id = p.id
""")
h.execute(channel_id = str(channel_id))
ret = h.fetchall()
if not ret:
return []
# process the results
# ret = map(lambda a: (a["path"]),
# __stringify(ret))
return ret
# list the latest packages for a channel
def list_packages(channel):
return _list_packages(channel, cache_prefix="list_packages",
function=list_packages_sql)
# list _all_ the packages for a channel
def list_all_packages(channel):
return _list_packages(channel, cache_prefix="list_all_packages",
function=list_all_packages_sql)
# list _all_ the packages for a channel, including checksum info
def list_all_packages_checksum(channel):
return _list_packages(channel, cache_prefix="list_all_packages_checksum",
function=list_all_packages_checksum_sql)
# list _all_ the packages for a channel
def list_all_packages_complete(channel):
return _list_packages(channel, cache_prefix="list_all_packages_complete",
function=list_all_packages_complete_sql)
# Common part of list_packages and list_all_packages*
# cache_prefix is the prefix for the file name we're caching this request as
# function is the generator function
def _list_packages(channel, cache_prefix, function):
log_debug(3, channel, cache_prefix)
# try the caching thing first
c_info = channel_info(channel)
if not c_info: # unknown channel
raise rhnFault(40, "could not find any data on channel '%s'" % channel)
cache_entry = "%s-%s" % (cache_prefix, channel)
ret = rhnCache.get(cache_entry, c_info["last_modified"])
if ret: # we scored a cache hit
log_debug(4, "Scored cache hit", channel)
# Mark the response as being already XMLRPC-encoded
rhnFlags.set("XMLRPC-Encoded-Response", 1)
return ret
ret = function(c_info["id"])
if not ret:
# we assume that channels with no packages are very fast to list,
# so we don't bother caching...
log_error("No packages found in channel",
c_info["id"], c_info["label"])
return []
# we need to append the channel label to the list
ret = map(lambda a, c = channel: a + (c,), ret)
ret = xmlrpclib.dumps((ret, ), methodresponse=1)
# Mark the response as being already XMLRPC-encoded
rhnFlags.set("XMLRPC-Encoded-Response", 1)
# set the cache
rhnCache.set(cache_entry, ret, c_info["last_modified"])
return ret
def getChannelInfoForKickstart(kickstart):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt
where c.id = kt.channel_id
and kt.label = :kickstart_label
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label = str(kickstart))
return h.fetchone_dict()
def getChannelInfoForKickstartOrg(kickstart, org_id):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt
where c.id = kt.channel_id
and kt.label = :kickstart_label
and kt.org_id = :org_id
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label = str(kickstart), org_id = int(org_id))
return h.fetchone_dict()
def getChannelInfoForKickstartSession(session):
# decode the session string
try:
session_id = int(session.split('x')[0].split(':')[0])
except Exception:
return None, None
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt,
rhnKickstartSession ks
where c.id = kt.channel_id
and kt.id = ks.kstree_id
and ks.id = :session_id
"""
h = rhnSQL.prepare(query)
h.execute(session_id = session_id)
return h.fetchone_dict()
def getChildChannelInfoForKickstart(kickstart, child):
query = """
select c.label,
to_char(c.last_modified, 'YYYYMMDDHH24MISS') last_modified
from rhnChannel c,
rhnKickstartableTree kt,
rhnKickstartSession ks,
rhnChannel c2
where c2.id = kt.channel_id
and kt.label = :kickstart_label
and c.label = :child_label
and c.parent_channel = c2.id
"""
h = rhnSQL.prepare(query)
h.execute(kickstart_label = str(kickstart), child_label = str(child))
return h.fetchone_dict()
def getChannelInfoForTinyUrl(tinyurl):
query = """
select tu.url
from rhnTinyUrl tu
where tu.enabled = 'Y'
and tu.token = :tinyurl
"""
h = rhnSQL.prepare(query)
h.execute(tinyurl = str(tinyurl))
return h.fetchone_dict()
# list the obsoletes for a channel
def list_obsoletes(channel):
log_debug(3, channel)
# try the caching thing first
c_info = channel_info(channel)
if not c_info: # unknown channel
raise rhnFault(40, "could not find any data on channel '%s'" % channel)
cache_entry = "list_obsoletes-%s" % channel
ret = rhnCache.get(cache_entry, c_info["last_modified"])
if ret: # we scored a cache hit
log_debug(4, "Scored cache hit", channel)
return ret
# Get the obsoleted packages
h = rhnSQL.prepare("""
select distinct
pn.name,
pe.version, pe.release, pe.epoch,
pa.label arch,
pc.name obsolete_name,
pc.version obsolete_version,
p_info.sense
from rhnPackageCapability pc,
rhnPackageArch pa,
rhnPackageEVR pe,
rhnPackageName pn,
rhnPackage p,
( select cp.channel_id,
po.package_id, po.capability_id, po.sense
from rhnPackageObsoletes po,
rhnChannelPackage cp,
rhnChannel c
where 1=1
and c.label = :channel
and c.id = cp.channel_id
and cp.package_id = po.package_id
) p_info
where 1=1
and p_info.package_id = p.id
and p.name_id = pn.id
and p.evr_id = pe.id
and p.package_arch_id = pa.id
and p_info.capability_id = pc.id
""")
h.execute(channel = str(channel))
# Store stuff in a dictionary to makes things simpler
hash = {}
while 1:
row = h.fetchone_dict()
if not row:
break
row = __stringify(row)
key = (row['name'], row['version'], row['release'],
row["epoch"], row['arch'])
value = key + (row['obsolete_name'], row['obsolete_version'],
row['sense'])
if not hash.has_key(key):
hash[key] = []
hash[key].append(value)
# Now grab a listall and match it against what we got
pkglist = list_packages_sql(c_info["id"])
result = []
for pkg in pkglist:
key = tuple(pkg[:5])
if hash.has_key(key):
for p in hash[key]:
result.append(p)
# we can cache this now
rhnCache.set(cache_entry, result, c_info["last_modified"])
return result
def __auth_user(server_id, username, password):
""" Auth if user can add/remove channel from given server """
log_debug(3, server_id, username)
# check the username and password for compliance
user = rhnUser.auth_username_password(username, password)
# The user's password checks, verify that they have perms on that
# server.
h = rhnSQL.prepare("""
select count(*)
from rhnUserServerPerms usp
where usp.user_id = :user_id
and usp.server_id = :server_id
""")
h.execute(user_id = str(user.getid()), server_id = str(server_id))
res = h.fetchone_dict()
if not res:
# Not allowed to perform administrative tasks on this server
raise rhnFault(37)
return 1
# small wrapper around a PL/SQL function
def _subscribe_sql(server_id, channel_id, commit = 1):
log_debug(3, server_id, channel_id, commit)
subscribe_channel = rhnSQL.Procedure("rhn_channel.subscribe_server")
try:
# don't run the EC yet
subscribe_channel(server_id, channel_id, 0)
except rhnSQL.SQLSchemaError, e:
if e.errno == 20235: # channel_family_no_subscriptions
raise SubscriptionCountExceeded(channel_id=channel_id), None, sys.exc_info()[2]
if e.errno == 20102: # channel_server_one_base
log_error("Channel subscribe failed, "\
"%s already subscribed to %s (?)" % (server_id, channel_id))
raise rhnFault(38, "Server already subscribed to %s" % channel_id), None, sys.exc_info()[2]
# If we got here, it's an unknown error; ISE (for now)
log_error("SQLSchemaError", e)
raise rhnException(e), None, sys.exc_info()[2]
except rhnSQL.SQLError, e:
# If we got here, it's an unknown error; ISE (for now)
log_error("SQLError", e)
raise rhnException(e), None, sys.exc_info()[2]
if commit:
rhnSQL.commit()
return 1
# Wrapper around _subscribe_sql, raises rhnFault instead of
# SubscriptionCountExceeded
def subscribe_sql(server_id, channel_id, commit=1):
try:
_subscribe_sql(server_id, channel_id, commit=commit)
except SubscriptionCountExceeded:
log_error("Subscription count exceeded for channel id %s" %
channel_id)
raise rhnFault(70, "Subscription count for the target channel exceeded"), None, sys.exc_info()[2]
_query_parent_channel_subscribed = rhnSQL.Statement("""
select 1
from rhnChannel c
join rhnServerChannel sc on c.parent_channel = sc.channel_id
where sc.server_id = :sid
and c.label = :channel
""")
_query_can_subscribe = rhnSQL.Statement("""
select rhn_channel.user_role_check(:cid, wc.id, 'subscribe') as can_subscribe
from web_contact wc
where wc.login_uc = upper(:username)
""")
# subscribe a server to a channel with authentication
def subscribe_channel(server_id, channel, username, password):
log_debug(3, server_id, channel, username)
# If auth doesn't blow up we're fine
__auth_user(server_id, username, password)
# get the channel_id
h = rhnSQL.prepare("select id from rhnChannel where label = :channel")
h.execute(channel = str(channel))
ret = h.fetchone_dict()
if not ret:
log_error("Channel %s does not exist?" % channel)
raise rhnFault(40, "Channel %s does not exist?" % channel)
channel_id = ret['id']
# check if server is subscribed to the parent of the given channel
h = rhnSQL.prepare(_query_parent_channel_subscribed)
h.execute(sid=server_id, channel=str(channel))
ret = h.fetchone_dict()
if not ret:
log_error("Parent of channel %s is not subscribed to server" % channel)
raise rhnFault(32, "Parent of channel %s is not subscribed to server" % channel)
# check specific channel subscription permissions
h = rhnSQL.prepare(_query_can_subscribe)
h.execute(cid=channel_id, username=username)
ret = h.fetchone_dict()
if ret and ret['can_subscribe']:
subscribe_sql(server_id, channel_id)
return 1
raise rhnFault(71)
# This class is only a convenient encapsulation of a server's attributes:
# server_id, org_id, release, arch, user_id. Sometimes we only pass the
# server_id, and later down the road we have to message "no channel for
# release foo, arch bar", but we don't know the release and arch anymore
class LiteServer:
_attributes = ['id', 'org_id', 'release', 'arch']
def __init__(self, **kwargs):
# Initialize attributes from **kwargs (set to None if value is not
# present)
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr))
def init_from_server(self, server):
self.id = server.getid()
self.org_id = server.server['org_id']
self.release = server.server['release']
self.arch = server.archname
return self
def __repr__(self):
dict = {}
for attr in self._attributes:
dict[attr] = getattr(self, attr)
return "<%s instance at %s: attributes=%s>" % (
self.__class__.__name__, id(self), dict)
# If raise_exceptions is set, BaseChannelDeniedError, NoBaseChannelError are
# raised
def guess_channels_for_server(server, user_id=None, none_ok=0,
raise_exceptions=0):
log_debug(3, server)
if not isinstance(server, LiteServer):
raise rhnException("Server object is not a LiteServer")
if None in (server.org_id, server.release, server.arch):
# need to obtain the release and/or arch and/or org_id
h = rhnSQL.prepare("""
select s.org_id, s.release, sa.label arch
from rhnServer s, rhnServerArch sa
where s.id = :server_id and s.server_arch_id = sa.id
""")
h.execute(server_id=server.id)
ret = h.fetchone_dict()
if not ret:
log_error("Could not get the release/arch "\
"for server %s" % server.id)
raise rhnFault(8, "Could not find the release/arch "\
"for server %s" % server.id)
if server.org_id is None:
server.org_id = ret["org_id"]
if server.release is None:
server.release = ret["release"]
if server.arch is None:
server.arch = ret["arch"]
if raise_exceptions and not none_ok:
# Let exceptions pass through
return channels_for_release_arch(server.release, server.arch,
server.org_id, user_id=user_id)
try:
return channels_for_release_arch(server.release, server.arch,
server.org_id, user_id=user_id)
except NoBaseChannelError:
if none_ok:
return []
log_error("No available channels for (server, org)",
(server.id, server.org_id), server.release, server.arch)
msg = _("Your account does not have access to any channels matching "
"(release='%(release)s', arch='%(arch)s')%(www_activation)s")
error_strings = {
'release' : server.release,
'arch' : server.arch,
'www_activation' : ''
}
if CFG.REFER_TO_WWW:
error_strings['www_activation'] = _("\nIf you have a "
"registration number, please register with it first at "
"http://www.redhat.com/apps/activate/ and then try again.\n\n")
raise rhnFault(19, msg % error_strings), None, sys.exc_info()[2]
except BaseChannelDeniedError:
if none_ok:
return []
raise rhnFault(71,
_("Insufficient subscription permissions for release (%s, %s")
% (server.release, server.arch)), None, sys.exc_info()[2]
# Subscribes the server to channels
# can raise SubscriptionCountExceeded, BaseChannelDeniedError, NoBaseChannelError
# Only used for new server registrations
def subscribe_server_channels(server, user_id=None, none_ok=0):
s = LiteServer().init_from_server(server)
# bretm 02/19/2007 -- have to leave none_ok in here for now due to how
# the code is setup for reg token crap; it'd be very nice to clean up that
# path to eliminate any chance for a server to be registered and not have base
# channels, excluding expiration of channel entitlements
channels = guess_channels_for_server(s, user_id=user_id, none_ok=none_ok,
raise_exceptions=1)
rhnSQL.transaction('subscribe_server_channels')
for c in channels:
try:
_subscribe_sql(s.id, c["id"], 0)
except SubscriptionCountExceeded, e:
rhnSQL.rollback('subscribe_server_channels')
# Re-raise the exception
e.channel = c
raise
return channels
# small wrapper around a PL/SQL function
def unsubscribe_sql(server_id, channel_id, commit = 1):
log_debug(3, server_id, channel_id, commit)
unsubscribe_channel = rhnSQL.Procedure("rhn_channel.unsubscribe_server")
try:
# don't run the EC yet
unsubscribe_channel(server_id, channel_id, 0)
except rhnSQL.SQLError:
log_error("Channel unsubscribe from %s failed for %s" % (
channel_id, server_id))
return 0
if commit:
rhnSQL.commit()
return 1
# unsubscribe a server from a channel
def unsubscribe_channel(server_id, channel, username, password):
log_debug(3, server_id, channel, username)
# If auth doesn't blow up we're fine
__auth_user(server_id, username, password)
# now get the id of the channel
h = rhnSQL.prepare("""
select id, parent_channel from rhnChannel where label = :channel
""")
h.execute(channel = channel)
ret = h.fetchone_dict()
if not ret:
log_error("Asked to unsubscribe server %s from non-existent channel %s" % (
server_id, channel))
raise rhnFault(40, "The specified channel '%s' does not exist." % channel)
if not ret["parent_channel"]:
log_error("Cannot unsubscribe %s from base channel %s" % (
server_id, channel))
raise rhnFault(72, "You can not unsubscribe %s from base channel %s." % (
server_id, channel))
# check specific channel subscription permissions
channel_id = ret['id']
h = rhnSQL.prepare(_query_can_subscribe)
h.execute(cid=channel_id, username=username)
ret = h.fetchone_dict()
if ret and ret['can_subscribe']:
return unsubscribe_sql(server_id, channel_id)
raise rhnFault(71)
# unsubscribe from all channels
def unsubscribe_all_channels(server_id):
log_debug(3, server_id)
# We need to unsubscribe the children channels before the base ones.
rhnSQL.transaction("unsub_all_channels")
h = rhnSQL.prepare("""
select
sc.channel_id id
from
rhnChannel c,
rhnServerChannel sc
where
sc.server_id = :server_id
and sc.channel_id = c.id
order by c.parent_channel nulls last
""")
h.execute(server_id = str(server_id))
while 1:
c = h.fetchone_dict()
if not c:
break
ret = unsubscribe_sql(server_id, c["id"], 0)
if not ret:
rhnSQL.rollback("unsub_all_channels")
raise rhnFault(36, "Could not unsubscribe server %s "\
"from existing channels" % (server_id,))
# finished unsubscribing
return 1
# Unsubscribe the server from the channels in the list
# A channel is a hash containing at least the keys:
# [id, label, parent_channel]
def unsubscribe_channels(server_id, channels):
log_debug(4, server_id, channels)
if not channels:
# Nothing to do
return 1
# We need to unsubscribe the children channels before the base ones.
rhnSQL.transaction("unsub_channels")
base_channels = filter(lambda x: not x['parent_channel'], channels)
child_channels = filter(lambda x: x['parent_channel'], channels)
for channel in child_channels + base_channels:
ret = unsubscribe_sql(server_id, channel["id"], 0)
if not ret:
rhnSQL.rollback("unsub_channels")
raise rhnFault(36, "Could not unsubscribe server %s "\
"from channel %s" % (server_id, channel["label"]))
# finished unsubscribing
return 1
# Subscribe the server to the channels in the list
# A channel is a hash containing at least the keys:
# [id, label, parent_channel]
def subscribe_channels(server_id, channels):
log_debug(4, server_id, channels)
if not channels:
# Nothing to do
return 1
# We need to subscribe the base channel before the child ones.
base_channels = filter(lambda x: not x['parent_channel'], channels)
child_channels = filter(lambda x: x['parent_channel'], channels)
for channel in base_channels + child_channels:
subscribe_sql(server_id, channel["id"], 0)
# finished subscribing
return 1
# check if a server is subscribed to a channel
def is_subscribed(server_id, channel):
log_debug(3, server_id, channel)
h = rhnSQL.prepare("""
select 1 subscribed
from rhnServerChannel sc, rhnChannel c
where
sc.channel_id = c.id
and c.label = :channel
and sc.server_id = :server_id
""")
h.execute(server_id = str(server_id), channel = str(channel))
ret = h.fetchone_dict()
if not ret:
# System not subscribed to channel
return 0
return 1
# Returns 0, "", "" if system does not need any message, or
# (error_code, message_title, message) otherwise
def system_reg_message(server):
server_id = server.server['id']
# Is this system subscribed to a channel?
h = rhnSQL.prepare("""
select sc.channel_id
from rhnServerChannel sc
where sc.server_id = :server_id
""")
h.execute(server_id=server_id)
ret = h.fetchone_dict()
if not ret:
# System not subscribed to any channel
#
return (-1, s_invalid_channel_title,
s_invalid_channel_message %
(server.server["release"], server.archname))
# System does have a base channel; check entitlements
from rhnServer import server_lib #having this on top, cause TB due circular imports
entitlements = server_lib.check_entitlement(server_id)
if not entitlements:
# No entitlement
# We don't have an autoentitle preference for now, so display just one
# message
templates = rhnFlags.get('templateOverrides')
if templates and templates.has_key('hostname'):
hostname = templates['hostname']
else:
# Default to www
hostname = "rhn.redhat.com"
params = {
'entitlement_url' : "https://%s"
"/network/systems/details/edit.pxt?sid=%s" %
(hostname, server_id)
}
return -1, no_entitlement_title, no_entitlement_message % params
return 0, "", ""
def subscribe_to_tools_channel(server_id):
"""
Subscribes server_id to the RHN Tools channel associated with its base channel, if one exists.
"""
base_channel_dict = get_base_channel(server_id, none_ok=1)
if base_channel_dict is None:
raise NoBaseChannelError("Server %s has no base channel." % \
str(server_id))
lookup_child_channels = rhnSQL.Statement("""
select id, label, parent_channel
from rhnChannel
where parent_channel = :id
""")
child_channel_data = rhnSQL.prepare(lookup_child_channels)
child_channel_data.execute(id=base_channel_dict['id'])
child_channels = child_channel_data.fetchall_dict()
if child_channels is None:
raise NoChildChannels("Base channel id %s has no child channels associated with it." % \
base_channel_dict['id'])
tools_channel = None
for channel in child_channels:
if channel.has_key('label'):
if 'rhn-tools' in channel['label']:
tools_channel = channel
if tools_channel is None:
raise NoToolsChannel("Base channel id %s does not have a RHN Tools channel as a child channel." % \
base_channel_dict['id'])
else:
if not tools_channel.has_key('id'):
raise InvalidChannel("RHN Tools channel has no id.")
if not tools_channel.has_key('label'):
raise InvalidChannel("RHN Tools channel has no label.")
if not tools_channel.has_key('parent_channel'):
raise InvalidChannel("RHN Tools channel has no parent_channel.")
subscribe_channels(server_id, [tools_channel])
# Various messages that can be reused
#
# bretm 02/07/2007 -- when we have better old-client documentation, probably
# will be safe to get rid of all this crap
h_invalid_channel_title = _("System Registered but Inactive")
h_invalid_channel_message = _("""
Invalid Architecture and OS release combination (%s, %s).
Your system has been registered, but will not receive updates
because it is not subscribed to a channel. If you have not yet
activated your product for service, please visit our website at:
http://www.redhat.com/apps/activate/
...to activate your product.""")
s_invalid_channel_title = _("System Registered but Inactive")
s_invalid_channel_message = _("""
Invalid Architecture and OS release combination (%s, %s).
Your system has been registered, but will not receive updates
because it could not be subscribed to a base channel.
Please contact your organization administrator for assistance.
""")
no_autoentitlement_message = _("""
This system has been successfully registered, but is not yet entitled
to service. To entitle this system to service, login to the web site at:
%(entitlement_url)s
""")
no_entitlement_title = _("System Registered but Inactive")
no_entitlement_message = _("""
This system has been successfully registered, but no service entitlements
were available. To entitle this system to service, login to the web site at:
%(entitlement_url)s
""")
subscription_count_exceeded_title = _("System Registered but Inactive")
subscription_count_exceeded_message = _("""
This system has been successfully registered, but the channel subscriptions
were exhausted
""")
|
moio/spacewalk
|
backend/server/rhnChannel.py
|
Python
|
gpl-2.0
| 70,026
|
[
"VisIt"
] |
fcae0832c1895d2c86d4ceb8d81b6a6af59450af2c1f101c06d139ca76437ed4
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Simulate electrophoresis of a linear polymer using the P3M electrostatics solver.
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
import espressomd
import espressomd.observables
import espressomd.polymer
from espressomd import electrostatics, interactions
logging.basicConfig(level=logging.INFO)
# Use a fixed int for a deterministic behavior
np.random.seed()
required_features = ["P3M", "EXTERNAL_FORCES", "WCA"]
espressomd.assert_features(required_features)
N_SAMPLES = 1000
N_INT_STEPS = 100
E_FIELD = 1.0
N_MONOMERS = 20
N_IONS = 100
WARM_STEPS = 20
WARM_N_TIMES = 20
MIN_DIST = 0.9
system = espressomd.System(box_l=3 * [100.0])
system.time_step = 0.01
system.cell_system.skin = 0.4
# non-bonded interactions
###############################################################
# WCA between monomers
system.non_bonded_inter[0, 0].wca.set_params(epsilon=1, sigma=1)
# WCA counter-ions - polymer
system.non_bonded_inter[0, 1].wca.set_params(epsilon=1, sigma=1)
# WCA ions - polymer
system.non_bonded_inter[0, 2].wca.set_params(epsilon=1, sigma=1)
# WCA between ions
system.non_bonded_inter[1, 2].wca.set_params(epsilon=1, sigma=1)
# bonded interactions
################################################################
harmonic_bond = interactions.HarmonicBond(k=10, r_0=2)
angle_harmonic_bond = interactions.AngleHarmonic(bend=10, phi0=np.pi)
system.bonded_inter.add(harmonic_bond)
system.bonded_inter.add(angle_harmonic_bond)
# create monomer beads and bonds
##########################################################################
init_polymer_pos = espressomd.polymer.linear_polymer_positions(n_polymers=1, beads_per_chain=N_MONOMERS, bond_length=2.0,
seed=2, bond_angle=np.pi, min_distance=1.8, start_positions=np.array([system.box_l / 2.0]))
system.part.add(pos=init_polymer_pos[0], q=-np.ones(N_MONOMERS))
for i in range(1, N_MONOMERS):
system.part[i].add_bond((harmonic_bond, i - 1))
for i in range(1, N_MONOMERS - 1):
system.part[i].add_bond((angle_harmonic_bond, i - 1, i + 1))
# create counter-ions
###################################################################
system.part.add(pos=np.random.random((N_MONOMERS, 3)) * system.box_l,
q=np.ones(N_MONOMERS),
type=np.ones(N_MONOMERS, dtype=int))
# create excess ions
###############################################################
system.part.add(pos=np.random.random((N_IONS, 3)) * system.box_l,
q=np.hstack((np.ones(N_IONS // 2), -np.ones(N_IONS // 2))),
type=np.array(np.hstack((np.ones(N_IONS // 2), 2 * np.ones(N_IONS // 2))), dtype=int))
logging.info("particle types: {}\n".format(system.part[:].type))
logging.info("total charge: {}".format(np.sum(system.part[:].q)))
# warm-up integration
###############################################################
system.integrator.set_steepest_descent(f_max=0, gamma=1e-3,
max_displacement=0.01)
i = 0
while system.analysis.min_dist() < MIN_DIST and i < WARM_N_TIMES:
logging.debug(
"total energy: {:+.2e}".format(system.analysis.energy()["total"]))
system.integrator.run(WARM_STEPS)
i += 1
logging.info(
"total energy after warm-up: {:+.2e}\n".format(system.analysis.energy()["total"]))
system.integrator.set_vv()
system.thermostat.set_langevin(kT=1.0, gamma=1.0, seed=42)
# activate electrostatics
#############################################################
p3m = electrostatics.P3M(prefactor=1.0, accuracy=1e-2)
system.actors.add(p3m)
# apply external force (external electric field)
#############################################################
n_part = len(system.part)
system.part[:].ext_force = np.dstack(
(system.part[:].q * np.ones(n_part) * E_FIELD, np.zeros(n_part), np.zeros(n_part)))[0]
# equilibration
#############################################################
system.integrator.run(500)
# observables for core analysis
#############################################################
obs_persistence_angles = espressomd.observables.CosPersistenceAngles(
ids=system.part[:N_MONOMERS].id)
acc_persistence_angles = espressomd.accumulators.MeanVarianceCalculator(
obs=obs_persistence_angles, delta_N=1)
system.auto_update_accumulators.add(acc_persistence_angles)
obs_bond_length = espressomd.observables.ParticleDistances(
ids=system.part[:N_MONOMERS].id)
acc_bond_length = espressomd.accumulators.MeanVarianceCalculator(
obs=obs_bond_length, delta_N=1)
system.auto_update_accumulators.add(acc_bond_length)
# data storage for python analysis
#############################################################
pos = np.full((N_SAMPLES, N_MONOMERS, 3), np.nan)
# sampling Loop
#############################################################
for i in range(N_SAMPLES):
if i % 100 == 0:
logging.info("\rsampling: {:4d}".format(i))
system.integrator.run(N_INT_STEPS)
pos[i] = system.part[:N_MONOMERS].pos
logging.info("\nsampling finished!\n")
# data analysis
############################################################
# calculate center of mass (COM) and its velocity
#############################################################
COM = pos.sum(axis=1) / N_MONOMERS
COM_v = (COM[1:] - COM[:-1]) / (N_INT_STEPS * system.time_step)
# calculate the electrophoretic mobility mu = v/E
##################################
mu = np.average(np.linalg.norm(COM_v, axis=1)) / E_FIELD
logging.info("electrophoretic mobility: {}".format(mu))
# calculate the persistence length...
#############################################################
# ...first python analysis
total_sampling_positions = []
total_cos_thetas = []
for positions in pos:
bond_vectors = positions[1:, :] - positions[:-1, :]
bond_lengths = np.linalg.norm(bond_vectors, axis=1)
normed_bond_vectors = bond_vectors / bond_lengths[:, np.newaxis]
# positions at which the angles between bonds are actually measured
sampling_positions = np.insert(np.cumsum(bond_lengths)[:-1], 0, 0.0)
cos_thetas = np.zeros_like(sampling_positions)
for i in range(len(normed_bond_vectors)):
cos_thetas[i] = np.dot(normed_bond_vectors[0], normed_bond_vectors[i])
total_sampling_positions.append(sampling_positions)
total_cos_thetas.append(cos_thetas)
sampling_positions = np.average(np.array(total_sampling_positions), axis=0)
cos_thetas = np.average(np.array(total_cos_thetas), axis=0)
def exponential(x, lp):
return np.exp(-x / lp)
opt, _ = scipy.optimize.curve_fit(exponential, sampling_positions, cos_thetas)
persistence_length = opt[0]
logging.info("persistence length (python analysis): {}".format(
persistence_length))
# ...second by using observables
def persistence_length_obs(
acc_bond_length, acc_persistence_angles, exponential):
bond_lengths_obs = np.array(acc_bond_length.mean())
sampling_positions_obs = np.insert(
np.cumsum(bond_lengths_obs)[:-1], 0, 0.0)
cos_thetas_obs = np.array(acc_persistence_angles.mean())
cos_thetas_obs = np.insert(cos_thetas_obs, 0, 1.0)
opt_obs, _ = scipy.optimize.curve_fit(
exponential, sampling_positions_obs, cos_thetas_obs)
return sampling_positions_obs, cos_thetas_obs, opt_obs[0]
sampling_positions_obs, cos_thetas_obs, persistence_length_obs = persistence_length_obs(
acc_bond_length, acc_persistence_angles, exponential)
logging.info("persistence length (observables): {}".format(
persistence_length_obs))
# plot the results
#############################################################
fig, axs = plt.subplots(3)
axs[0].plot(COM[:, 0], label="COM pos in x-direction")
axs[0].plot(COM[:, 1], label="COM pos in y-direction")
axs[0].plot(COM[:, 2], label="COM pos in z-direction")
axs[0].legend()
axs[0].set_xlabel("time step")
axs[0].set_ylabel("r")
axs[1].plot(COM_v[:, 0], label="COM v in x-direction")
axs[1].plot(COM_v[:, 1], label="COM v in y-direction")
axs[1].plot(COM_v[:, 2], label="COM v in z-direction")
axs[1].legend()
axs[1].set_xlabel("time step")
axs[1].set_ylabel("v")
axs[2].plot(sampling_positions, cos_thetas, 'o',
label="python analysis raw data")
axs[2].plot(sampling_positions_obs, cos_thetas_obs, 'o',
label="observable analysis raw data")
axs[2].plot(sampling_positions, exponential(sampling_positions,
opt[0]), label="exponential fit with python analysis")
axs[2].plot(sampling_positions_obs, exponential(sampling_positions_obs,
persistence_length_obs), label="exponential fit with observable data")
axs[2].legend()
axs[2].set_xlabel("distance along polymer")
axs[2].set_ylabel(r"$\langle \cos(\theta) \rangle$")
plt.show()
|
fweik/espresso
|
samples/electrophoresis.py
|
Python
|
gpl-3.0
| 9,523
|
[
"ESPResSo"
] |
5fb82f4d9cb5f9be0df809ab0e163bac4d4a0eec07650d77303c75ab3dc11658
|
# $HeadURL: $
''' AccountingCacheCommand
The AccountingCacheCommand class is a command module that collects command
classes to store accounting results in the accounting cache.
'''
from datetime import datetime, timedelta
from DIRAC import S_OK, S_ERROR
from DIRAC.AccountingSystem.Client.ReportsClient import ReportsClient
from DIRAC.ConfigurationSystem.Client.Helpers import Resources
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
#from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient
from DIRAC.ResourceStatusSystem.Utilities import CSHelpers
__RCSID__ = '$Id: $'
################################################################################
################################################################################
#class TransferQualityByDestSplittedCommand( Command ):
#
# def __init__( self, args = None, clients = None ):
#
# super( TransferQualityByDestSplittedCommand, self ).__init__( args, clients )
#
## if 'ResourceStatusClient' in self.apis:
## self.rsClient = self.apis[ 'ResourceStatusClient' ]
## else:
## self.rsClient = ResourceStatusClient()
#
# if 'ReportsClient' in self.apis:
# self.rClient = self.apis[ 'ReportsClient' ]
# else:
# self.rClient = ReportsClient()
#
# if 'ReportGenerator' in self.apis:
# self.rgClient = self.apis[ 'ReportGenerator' ]
# else:
# self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
#
# self.rClient.rpcClient = self.rgClient
#
# def doCommand( self ):
# """
# Returns transfer quality using the DIRAC accounting system for every SE
# for the last self.args[0] hours
#
# :params:
# :attr:`sources`: list of source sites (when not given, take every site)
#
# :attr:`SEs`: list of storage elements (when not given, take every SE)
#
# :returns:
#
# """
#
# if not 'hours' in self.args:
# return S_ERROR( 'Number of hours not specified' )
# hours = self.args[ 'hours' ]
#
# sites = None
# if 'sites' in self.args:
# sites = self.args[ 'sites' ]
# if sites is None:
##FIXME: pointing to the CSHelper instead
## meta = { 'columns' : 'SiteName' }
## sources = self.rsClient.getSite( meta = meta )
## if not sources[ 'OK' ]:
## return sources
## sources = [ s[0] for s in sources[ 'Value' ] ]
# sites = CSHelpers.getSites()
# if not sites['OK']:
# return sites
#
# sites = sites[ 'Value' ]
# #sites = [ site[ 0 ] for site in sites[ 'Value' ] ]
#
# ses = None
# if 'ses' in self.args:
# ses = self.args[ 'ses' ]
# if ses is None:
##FIXME: pointing to the CSHelper instead
## meta = { 'columns' : 'StorageElementName' }
## ses = self.rsClient.getStorageElement( meta = meta )
## if not ses[ 'OK' ]:
## return ses
## ses = [ se[0] for se in ses[ 'Value' ] ]
# ses = CSHelpers.getStorageElements()
# if not ses['OK']:
# return ses
#
# ses = ses[ 'Value' ]
# #ses = [ se[ 0 ] for se in ses[ 'Value' ] ]
## if sources is None:
## meta = { 'columns' : 'SiteName' }
## sources = self.rsClient.getSite( meta = meta )
## if not sources[ 'OK' ]:
## return sources
## sources = [ s[0] for s in sources[ 'Value' ] ]
#
# if not sites + ses:
# return S_ERROR( 'Sites + SEs is empty' )
#
# toD = datetime.utcnow()
# fromD = toD - timedelta( hours = hours )
#
# qualityAll = self.rClient.getReport( 'DataOperation', 'Quality', fromD, toD,
# { 'OperationType' : 'putAndRegister',
# 'Source' : sites + ses,
# 'Destination' : sites + ses
# }, 'Destination' )
#
# if not qualityAll[ 'OK' ]:
# return qualityAll
# qualityAll = qualityAll[ 'Value' ]
#
# if not 'data' in qualityAll:
# return S_ERROR( 'Missing data key' )
# if not 'granularity' in qualityAll:
# return S_ERROR( 'Missing granularity key' )
#
# singlePlots = {}
# for se, value in qualityAll[ 'data' ].items():
# plot = {}
# plot[ 'data' ] = { se: value }
# plot[ 'granularity' ] = qualityAll[ 'granularity' ]
# singlePlots[ se ] = plot
#
# return S_OK( singlePlots )
#
#################################################################################
#################################################################################
#
#class TransferQualityByDestSplittedSiteCommand( Command ):
#
# def __init__( self, args = None, clients = None ):
#
# super( TransferQualityByDestSplittedSiteCommand, self ).__init__( args, clients )
#
# if 'ResourceStatusClient' in self.apis:
# self.rsClient = self.apis[ 'ResourceStatusClient' ]
# else:
# self.rsClient = ResourceStatusClient()
#
# if 'ReportsClient' in self.apis:
# self.rClient = self.apis[ 'ReportsClient' ]
# else:
# self.rClient = ReportsClient()
#
# if 'ReportGenerator' in self.apis:
# self.rgClient = self.apis[ 'ReportGenerator' ]
# else:
# self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
#
# self.rClient.rpcClient = self.rgClient
#
# def doCommand( self ):
# """
# Returns transfer quality using the DIRAC accounting system for every SE
# of a single site for the last self.args[0] hours
#
# :params:
# :attr:`sources`: list of source sites (when not given, take every site)
#
# :attr:`SEs`: list of storage elements (when not given, take every SE)
#
# :returns:
#
# """
#
# if not 'hours' in self.args:
# return S_ERROR( 'Number of hours not specified' )
# hours = self.args[ 'hours' ]
#
# sites = None
# if 'sites' in self.args:
# sites = self.args[ 'sites' ]
# if sites is None:
##FIXME: pointing to the CSHelper instead
## sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
## if not sources[ 'OK' ]:
## return sources
## sources = [ si[0] for si in sources[ 'Value' ] ]
# sites = CSHelpers.getSites()
# if not sites['OK']:
# return sites
# sites = sites[ 'Value' ]
#
# ses = None
# if 'ses' in self.args:
# ses = self.args[ 'ses' ]
# if ses is None:
##FIXME: pointing to the CSHelper instead
## meta = { 'columns' : 'StorageElementName' }
## ses = self.rsClient.getStorageElement( meta = meta )
## if not ses[ 'OK' ]:
## return ses
## ses = [ se[0] for se in ses[ 'Value' ] ]
# ses = CSHelpers.getStorageElements()
# if not ses['OK']:
# return ses
#
# ses = ses[ 'Value' ]
#
# if not sites + ses:
# return S_ERROR( 'Sites + SEs is empty' )
#
# return S_ERROR( 'This guy is buggy, missing method on rsClient' )
#
# fromD = datetime.utcnow() - timedelta( hours = hours )
# toD = datetime.utcnow()
#
# qualityAll = self.rClient.getReport( 'DataOperation', 'Quality', fromD, toD,
# { 'OperationType' : 'putAndRegister',
# 'Source' : sites + ses,
# 'Destination' : sites + ses
# }, 'Destination' )
# if not qualityAll[ 'OK' ]:
# return qualityAll
# qualityAll = qualityAll[ 'Value' ]
#
# if not 'data' in qualityAll:
# return S_ERROR( 'Missing data key' )
# listOfDest = qualityAll[ 'data' ].keys()
#
# if not 'granularity' in qualityAll:
# return S_ERROR( 'Missing granularity key' )
# plotGran = qualityAll[ 'granularity' ]
#
# storSitesWeb = self.rsClient.getMonitoredsStatusWeb( 'StorageElement',
# { 'StorageElementName': listOfDest }, 0, 300 )
# if not storSitesWeb[ 'OK' ]:
# return storSitesWeb
# storSitesWeb = storSitesWeb[ 'Value' ]
#
# if not 'Records' in storSitesWeb:
# return S_ERROR( 'Missing Records key' )
# storSitesWeb = storSitesWeb[ 'Records' ]
#
# SESiteMapping = {}
# siteSEMapping = {}
#
# #FIXME: this is very likely going to explode sooner or later...
# for r in storSitesWeb:
# sites = r[ 2 ].split( ' ' )[ :-1 ]
# SESiteMapping[ r[ 0 ] ] = sites
#
# for se in SESiteMapping.keys():
# for site in SESiteMapping[ se ]:
# try:
# l = siteSEMapping[ site ]
# l.append( se )
# siteSEMapping[ site ] = l
# except KeyError:
# siteSEMapping[ site ] = [ se ]
#
# singlePlots = {}
#
# #FIXME: refactor it
# for site in siteSEMapping.keys():
# plot = {}
# plot[ 'data' ] = {}
# for SE in siteSEMapping[site]:
# plot[ 'data' ][ se ] = qualityAll[ 'data' ][ se ]
# plot[ 'granularity' ] = plotGran
#
# singlePlots[ site ] = plot
#
# return S_OK( singlePlots )
################################################################################
################################################################################
#class TransferQualityBySourceSplittedSite_Command( Command ):
#
# __APIs__ = [ 'ResourceStatusClient', 'ReportsClient', 'ReportGenerator' ]
#
# def doCommand( self, sources = None, SEs = None ):
# """
# Returns transfer quality using the DIRAC accounting system for every SE
# of a single site and for the site itself for the last self.args[0] hours
#
# :params:
# :attr:`dests`: list of destinations (when not given, take everything)
#
# :attr:`SEs`: list of storage elements (when not given, take every SE)
#
# :returns:
#
# """
#
# super( TransferQualityBySourceSplittedSite_Command, self ).doCommand()
# self.apis = initAPIs( self.__APIs__, self.apis )
#
# if SEs is None:
# SEs = self.apis[ 'ResourceStatusClient' ].getStorageElement( columns = 'StorageElementName' )
# if not SEs[ 'OK' ]:
# else:
# SEs = SEs[ 'Value' ]
#
# if sources is None:
# sources = self.apis[ 'ResourceStatusClient' ].getSitesList()
# if not sources[ 'OK' ]:
# else:
# sources = sources[ 'Value' ]
#
# self.apis[ 'ReportsClient' ].rpcClient = self.apis[ 'ReportGenerator' ]
#
# fromD = datetime.utcnow()-timedelta( hours = self.args[ 0 ] )
# toD = datetime.utcnow()
#
# try:
# qualityAll = self.apis[ 'ReportsClient' ].getReport( 'DataOperation', 'Quality', fromD, toD,
# { 'OperationType':'putAndRegister',
# 'Source': sources + SEs, 'Destination': sources + SEs },
# 'Destination')
# if not qualityAll[ 'OK' ]:
# else:
# qualityAll = qualityAll[ 'Value' ]
#
# except:
# gLogger.exception( "Exception when calling TransferQualityByDestSplittedSite_Command" )
# return {}
#
# listOfDest = qualityAll[ 'data' ].keys()
#
# try:
# storSitesWeb = self.apis[ 'ResourceStatusClient' ].getMonitoredsStatusWeb( 'StorageElement', { 'StorageElementName': listOfDest }, 0, 300)
# except:
# gLogger.exception( "Exception when calling TransferQualityByDestSplittedSite_Command" )
# return {}
#
# if not storSitesWeb[ 'OK' ]:
# else:
# storSitesWeb = storSitesWeb[ 'Value' ][ 'Records' ]
#
# SESiteMapping = {}
# siteSEMapping = {}
#
# for r in storSitesWeb:
# sites = r[ 2 ].split( ' ' )[ :-1 ]
# SESiteMapping[ r[ 0 ] ] = sites
#
# for SE in SESiteMapping.keys():
# for site in SESiteMapping[ SE ]:
# try:
# l = siteSEMapping[ site ]
# l.append( SE )
# siteSEMapping[ site ] = l
# except KeyError:
# siteSEMapping[ site ] = [ SE ]
#
#
# plotGran = qualityAll[ 'granularity' ]
#
# singlePlots = {}
#
# for site in siteSEMapping.keys():
# plot = {}
# plot[ 'data' ] = {}
# for SE in siteSEMapping[ site ]:
# plot[ 'data' ][ SE ] = qualityAll[ 'data' ][ SE ]
# plot[ 'granularity' ] = plotGran
#
# singlePlots[ site ] = plot
#
# resToReturn = { 'DataOperation': singlePlots }
#
# return resToReturn
#
# doCommand.__doc__ = Command.doCommand.__doc__ + doCommand.__doc__
################################################################################
################################################################################
#class FailedTransfersBySourceSplittedCommand( Command ):
#
# def __init__( self, args = None, clients = None ):
#
# super( FailedTransfersBySourceSplittedCommand, self ).__init__( args, clients )
#
# if 'ReportsClient' in self.apis:
# self.rClient = self.apis[ 'ReportsClient' ]
# else:
# self.rClient = ReportsClient()
#
# if 'ReportGenerator' in self.apis:
# self.rgClient = self.apis[ 'ReportGenerator' ]
# else:
# self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
#
# self.rClient.rpcClient = self.rgClient
#
# def doCommand( self):
# """
# Returns failed transfer using the DIRAC accounting system for every SE
# for the last self.args[0] hours
#
# :params:
# :attr:`sources`: list of source sites (when not given, take every site)
#
# :attr:`SEs`: list of storage elements (when not given, take every SE)
#
# :returns:
#
# """
#
# if not 'hours' in self.args:
# return S_ERROR( 'Number of hours not specified' )
# hours = self.args[ 'hours' ]
#
# sites = None
# if 'sites' in self.args:
# sites = self.args[ 'sites' ]
# if sites is None:
##FIXME: pointing to the CSHelper instead
## sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
## if not sources[ 'OK' ]:
## return sources
## sources = [ si[0] for si in sources[ 'Value' ] ]
# sites = CSHelpers.getSites()
# if not sites['OK']:
# return sites
# sites = sites[ 'Value' ]
#
# ses = None
# if 'ses' in self.args:
# ses = self.args[ 'ses' ]
# if ses is None:
##FIXME: pointing to the CSHelper instead
## meta = { 'columns' : 'StorageElementName' }
## ses = self.rsClient.getStorageElement( meta = meta )
## if not ses[ 'OK' ]:
## return ses
## ses = [ se[0] for se in ses[ 'Value' ] ]
# ses = CSHelpers.getStorageElements()
# if not ses['OK']:
# return ses
#
# ses = ses[ 'Value' ]
#
# if not sites + ses:
# return S_ERROR( 'Sites + SEs is empty' )
#
# fromD = datetime.utcnow()-timedelta( hours = hours )
# toD = datetime.utcnow()
#
# failedTransfers = self.rClient.getReport( 'DataOperation', 'FailedTransfers', fromD, toD,
# { 'OperationType' : 'putAndRegister',
# 'Source' : sites + ses,
# 'Destination' : sites + ses,
# 'FinalStatus' : [ 'Failed' ]
# }, 'Source' )
# if not failedTransfers[ 'OK' ]:
# return failedTransfers
# failedTransfers = failedTransfers[ 'Value' ]
#
# if not 'data' in failedTransfers:
# return S_ERROR( 'Missing data key' )
# if not 'granularity' in failedTransfers:
# return S_ERROR( 'Missing granularity key' )
#
# singlePlots = {}
#
# for source, value in failedTransfers[ 'data' ].items():
# if source in sites:
# plot = {}
# plot[ 'data' ] = { source: value }
# plot[ 'granularity' ] = failedTransfers[ 'granularity' ]
# singlePlots[ source ] = plot
#
# return S_OK( singlePlots )
################################################################################
################################################################################
class SuccessfullJobsBySiteSplittedCommand( Command ):
def __init__( self, args = None, clients = None ):
super( SuccessfullJobsBySiteSplittedCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
def doCommand( self ):
"""
Returns successfull jobs using the DIRAC accounting system for every site
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every site)
:returns:
"""
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
sites = None
if 'sites' in self.args:
sites = self.args[ 'sites' ]
if sites is None:
#FIXME: pointing to the CSHelper instead
# sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
# if not sources[ 'OK' ]:
# return sources
# sources = [ si[0] for si in sources[ 'Value' ] ]
sites = Resources.getSites()
if not sites['OK']:
return sites
sites = sites[ 'Value' ]
if not sites:
return S_ERROR( 'Sites is empty' )
fromD = datetime.utcnow()-timedelta( hours = hours )
toD = datetime.utcnow()
successfulJobs = self.rClient.getReport( 'Job', 'NumberOfJobs', fromD, toD,
{ 'FinalStatus' : [ 'Done' ],
'Site' : sites
}, 'Site' )
if not successfulJobs[ 'OK' ]:
return successfulJobs
successfulJobs = successfulJobs[ 'Value' ]
if not 'data' in successfulJobs:
return S_ERROR( 'Missing data key' )
if not 'granularity' in successfulJobs:
return S_ERROR( 'Missing granularity key' )
singlePlots = {}
for site, value in successfulJobs[ 'data' ].items():
if site in sites:
plot = {}
plot[ 'data' ] = { site: value }
plot[ 'granularity' ] = successfulJobs[ 'granularity' ]
singlePlots[ site ] = plot
return S_OK( singlePlots )
################################################################################
################################################################################
class FailedJobsBySiteSplittedCommand( Command ):
def __init__( self, args = None, clients = None ):
super( FailedJobsBySiteSplittedCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
def doCommand( self ):
"""
Returns failed jobs using the DIRAC accounting system for every site
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every site)
:returns:
"""
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
sites = None
if 'sites' in self.args:
sites = self.args[ 'sites' ]
if sites is None:
#FIXME: pointing to the CSHelper instead
# sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
# if not sources[ 'OK' ]:
# return sources
# sources = [ si[0] for si in sources[ 'Value' ] ]
sites = Resources.getSites()
if not sites[ 'OK' ]:
return sites
sites = sites[ 'Value' ]
if not sites:
return S_ERROR( 'Sites is empty' )
fromD = datetime.utcnow() - timedelta( hours = hours )
toD = datetime.utcnow()
failedJobs = self.rClient.getReport( 'Job', 'NumberOfJobs', fromD, toD,
{ 'FinalStatus' : [ 'Failed' ],
'Site' : sites
}, 'Site' )
if not failedJobs[ 'OK' ]:
return failedJobs
failedJobs = failedJobs[ 'Value' ]
if not 'data' in failedJobs:
return S_ERROR( 'Missing data key' )
if not 'granularity' in failedJobs:
return S_ERROR( 'Missing granularity key' )
singlePlots = {}
for site, value in failedJobs[ 'data' ].items():
if site in sites:
plot = {}
plot[ 'data' ] = { site: value }
plot[ 'granularity' ] = failedJobs[ 'granularity' ]
singlePlots[ site ] = plot
return S_OK( singlePlots )
################################################################################
################################################################################
class SuccessfullPilotsBySiteSplittedCommand( Command ):
def __init__( self, args = None, clients = None ):
super( SuccessfullPilotsBySiteSplittedCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
def doCommand( self ):
"""
Returns successfull pilots using the DIRAC accounting system for every site
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every site)
:returns:
"""
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
sites = None
if 'sites' in self.args:
sites = self.args[ 'sites' ]
if sites is None:
#FIXME: pointing to the CSHelper instead
# sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
# if not sources[ 'OK' ]:
# return sources
# sources = [ si[0] for si in sources[ 'Value' ] ]
sites = Resources.getSites()
if not sites[ 'OK' ]:
return sites
sites = sites[ 'Value' ]
if not sites:
return S_ERROR( 'Sites is empty' )
fromD = datetime.utcnow()-timedelta( hours = hours )
toD = datetime.utcnow()
succesfulPilots = self.rClient.getReport( 'Pilot', 'NumberOfPilots', fromD, toD,
{ 'GridStatus' : [ 'Done' ],
'Site' : sites
}, 'Site' )
if not succesfulPilots[ 'OK' ]:
return succesfulPilots
succesfulPilots = succesfulPilots[ 'Value' ]
if not 'data' in succesfulPilots:
return S_ERROR( 'Missing data key' )
if not 'granularity' in succesfulPilots:
return S_ERROR( 'Missing granularity key' )
singlePlots = {}
for site, value in succesfulPilots[ 'data' ].items():
if site in sites:
plot = {}
plot[ 'data' ] = { site: value }
plot[ 'granularity' ] = succesfulPilots[ 'granularity' ]
singlePlots[ site ] = plot
return S_OK( singlePlots )
################################################################################
################################################################################
class FailedPilotsBySiteSplittedCommand( Command ):
def __init__( self, args = None, clients = None ):
super( FailedPilotsBySiteSplittedCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
def doCommand( self ):
"""
Returns failed jobs using the DIRAC accounting system for every site
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every site)
:returns:
"""
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
sites = None
if 'sites' in self.args:
sites = self.args[ 'sites' ]
if sites is None:
#FIXME: pointing to the CSHelper instead
# sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
# if not sources[ 'OK' ]:
# return sources
# sources = [ si[0] for si in sources[ 'Value' ] ]
sites = Resources.getSites()
if not sites[ 'OK' ]:
return sites
sites = sites[ 'Value' ]
if not sites:
return S_ERROR( 'Sites is empty' )
fromD = datetime.utcnow() - timedelta( hours = hours )
toD = datetime.utcnow()
failedPilots = self.rClient.getReport( 'Pilot', 'NumberOfPilots', fromD, toD,
{ 'GridStatus' : [ 'Aborted' ],
'Site' : sites
}, 'Site' )
if not failedPilots[ 'OK' ]:
return failedPilots
failedPilots = failedPilots[ 'Value' ]
if not 'data' in failedPilots:
return S_ERROR( 'Missing data key' )
if not 'granularity' in failedPilots:
return S_ERROR( 'Missing granularity key' )
singlePlots = {}
for site, value in failedPilots[ 'data' ].items():
if site in sites:
plot = {}
plot[ 'data' ] = { site: value }
plot[ 'granularity' ] = failedPilots[ 'granularity' ]
singlePlots[ site ] = plot
return S_OK( singlePlots )
################################################################################
################################################################################
class SuccessfullPilotsByCESplittedCommand( Command ):
def __init__( self, args = None, clients = None ):
super( SuccessfullPilotsByCESplittedCommand, self ).__init__( args, clients )
self.resources = Resources.Resources()
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
def doCommand( self ):
"""
Returns successfull pilots using the DIRAC accounting system for every CE
for the last self.args[0] hours
:params:
:attr:`CEs`: list of CEs (when not given, take every CE)
:returns:
"""
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
ces = None
if 'ces' in self.args:
ces = self.args[ 'ces' ]
if ces is None:
#FIXME: pointing to the CSHelper instead
# meta = {'columns':'ResourceName'}
# CEs = self.rsClient.getResource( resourceType = [ 'CE','CREAMCE' ], meta = meta )
# if not CEs['OK']:
# return CEs
# CEs = [ ce[0] for ce in CEs['Value'] ]
ces = self.resources.getEligibleResources( 'Computing' )
if not ces[ 'OK' ]:
return ces
ces = ces[ 'Value' ]
if not ces:
return S_ERROR( 'CEs is empty' )
fromD = datetime.utcnow() - timedelta( hours = hours )
toD = datetime.utcnow()
successfulPilots = self.rClient.getReport( 'Pilot', 'NumberOfPilots', fromD, toD,
{ 'GridStatus' : [ 'Done' ],
'GridCE' : ces
}, 'GridCE' )
if not successfulPilots[ 'OK' ]:
return successfulPilots
successfulPilots = successfulPilots[ 'Value' ]
if not 'data' in successfulPilots:
return S_ERROR( 'Missing data key' )
if not 'granularity' in successfulPilots:
return S_ERROR( 'Missing granularity key' )
singlePlots = {}
for ce, value in successfulPilots[ 'data' ].items():
if ce in ces:
plot = {}
plot[ 'data' ] = { ce : value }
plot[ 'granularity' ] = successfulPilots[ 'granularity' ]
singlePlots[ ce ] = plot
return S_OK( singlePlots )
################################################################################
################################################################################
class FailedPilotsByCESplittedCommand( Command ):
def __init__( self, args = None, clients = None ):
super( FailedPilotsByCESplittedCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
def doCommand( self ):
"""
Returns failed pilots using the DIRAC accounting system for every CE
for the last self.args[0] hours
:params:
:attr:`CEs`: list of CEs (when not given, take every CE)
:returns:
"""
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
ces = None
if 'ces' in self.args:
ces = self.args[ 'ces' ]
if ces is None:
#FIXME: pointing to the CSHelper instead
# meta = {'columns':'ResourceName'}
# CEs = self.rsClient.getResource( resourceType = [ 'CE','CREAMCE' ], meta = meta )
# if not CEs['OK']:
# return CEs
# CEs = [ ce[0] for ce in CEs['Value'] ]
ces = CSHelpers.getComputingElements()
if not ces[ 'OK' ]:
return ces
ces = ces[ 'Value' ]
if not ces:
return S_ERROR( 'CEs is empty' )
fromD = datetime.utcnow() - timedelta( hours = hours )
toD = datetime.utcnow()
failedPilots = self.rClient.getReport( 'Pilot', 'NumberOfPilots', fromD, toD,
{ 'GridStatus' : [ 'Aborted' ],
'GridCE' : ces
}, 'GridCE' )
if not failedPilots[ 'OK' ]:
return failedPilots
failedPilots = failedPilots[ 'Value' ]
if not 'data' in failedPilots:
return S_ERROR( 'Missing data key' )
if not 'granularity' in failedPilots:
return S_ERROR( 'Missing granularity key' )
singlePlots = {}
for ce, value in failedPilots[ 'data' ].items():
if ce in ces:
plot = {}
plot[ 'data' ] = { ce : value }
plot[ 'granularity' ] = failedPilots[ 'granularity' ]
singlePlots[ ce ] = plot
return S_OK( singlePlots )
################################################################################
################################################################################
class RunningJobsBySiteSplittedCommand( Command ):
def __init__( self, args = None, clients = None ):
super( RunningJobsBySiteSplittedCommand, self ).__init__( args, clients )
if 'ReportsClient' in self.apis:
self.rClient = self.apis[ 'ReportsClient' ]
else:
self.rClient = ReportsClient()
if 'ReportGenerator' in self.apis:
self.rgClient = self.apis[ 'ReportGenerator' ]
else:
self.rgClient = RPCClient( 'Accounting/ReportGenerator' )
self.rClient.rpcClient = self.rgClient
def doCommand( self ):
"""
Returns running and runned jobs, querying the WMSHistory
for the last self.args[0] hours
:params:
:attr:`sites`: list of sites (when not given, take every sites)
:returns:
"""
if not 'hours' in self.args:
return S_ERROR( 'Number of hours not specified' )
hours = self.args[ 'hours' ]
sites = None
if 'sites' in self.args:
sites = self.args[ 'sites' ]
if sites is None:
#FIXME: pointing to the CSHelper instead
# sources = self.rsClient.getSite( meta = {'columns': 'SiteName'} )
# if not sources[ 'OK' ]:
# return sources
# sources = [ si[0] for si in sources[ 'Value' ] ]
sites = Resources.getSites()
if not sites[ 'OK' ]:
return sites
sites = sites[ 'Value' ]
if not sites:
return S_ERROR( 'Sites is empty' )
fromD = datetime.utcnow() - timedelta( hours = hours )
toD = datetime.utcnow()
runJobs = self.rClient.getReport( 'WMSHistory', 'NumberOfJobs', fromD, toD,
{}, 'Site')
if not runJobs[ 'OK' ]:
return runJobs
runJobs = runJobs[ 'Value' ]
if not 'data' in runJobs:
return S_ERROR( 'Missing data key' )
if not 'granularity' in runJobs:
return S_ERROR( 'Missing granularity key' )
singlePlots = {}
for site, value in runJobs[ 'data' ].items():
if site in sites:
plot = {}
plot[ 'data' ] = { site: value }
plot[ 'granularity' ] = runJobs[ 'granularity' ]
singlePlots[ site ] = plot
return S_OK( singlePlots )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
|
Sbalbp/DIRAC
|
ResourceStatusSystem/Command/AccountingCacheCommand.py
|
Python
|
gpl-3.0
| 34,774
|
[
"DIRAC"
] |
c0ed4ac83473eb104ceccb4d5bca45b950d9473a6f45143d4472d45e029d9a00
|
import numpy as np
from gpaw.io.tar import Writer, Reader
class IncrementalWriter(Writer):
_iterkey = 'niter'
_partkey = 'part'
_iterpattern = '/%06d.part'
def __init__(self, name):
Writer.__init__(self, name)
self.dims[self._iterkey] = 0
self.dims[self._partkey] = 1
self.partitions = {}
self.xml3 = []
def partition(self, name, shape, array=None, dtype=None, units=None):
if array is not None:
array = np.asarray(array)
self.dtype, type, itemsize = self.get_data_type(array, dtype)
assert self._partkey not in shape
shape = (self._partkey,) + shape
if name not in self.partitions.keys():
self.xml3 += [' <partition name="%s" type="%s">' % (name, type)]
self.xml3 += [' <dimension length="%s" name="%s"/>' %
(self.dims[dim], dim)
for dim in shape]
self.xml3 += [' </partition>']
self.partitions[name] = shape
self.shape = [self.dims[dim] for dim in shape]
else:
assert self.partitions[name] == shape
size = itemsize * np.product([self.dims[dim] for dim in shape])
name += self._iterpattern % self.dims[self._iterkey]
self.write_header(name, size)
if array is not None:
self.fill(array)
def next(self):
self.dims[self._iterkey] += self.dims[self._partkey]
def close(self):
partdim = ' <dimension length="%s" name="%s"/>' % \
(self.dims[self._partkey], self._partkey)
iterdim = ' <dimension length="%s" name="%s"/>' % \
(self.dims[self._iterkey], self._iterkey)
while partdim in self.xml3:
i = self.xml3.index(partdim)
self.xml3[i] = iterdim
self.xml2 += self.xml3
self.xml3 = []
Writer.close(self)
# -------------------------------------------------------------------
class _FakeFileObject(object):
def __init__(self, fileparts, partsize):
self.fileparts = fileparts
self.partsize = partsize
self.fileobj = None
for fileobj in self.fileparts:
assert fileobj.size == partsize
self.seek(0)
def seek(self, pos, whence=0):
self.part, partpos = divmod(pos, self.partsize)
self.fileobj = self.fileparts[self.part]
self.fileobj.seek(partpos, whence)
def tell(self):
return self.fileobj.tell() + self.part*self.partsize
def read(self, size=None):
self.part, partpos = divmod(self.tell(), self.partsize)
buf = str()
n = self.tell()
# read some of initial part
partrem = min(self.partsize - partpos, size)
buf += self.fileobj.read(partrem)
n += partrem
# read whole parts
while size - n > self.partsize:
self.seek(n)
buf += self.fileobj.read(self.partsize)
n += self.partsize
# read some of final part
self.seek(n)
rem = size - n
buf += self.fileobj.read(rem)
return buf
def close(self):
for fileobj in self.fileparts:
fileobj.close()
class IncrementalReader(Reader):
_iterkey = 'niter'
_partkey = 'part'
_iterpattern = '/%06d.part'
def __init__(self, name):
self.partitions = {}
Reader.__init__(self, name)
self.dims[self._partkey] = 1
def startElement(self, tag, attrs):
if tag == 'partition':
name = attrs['name']
assert name not in self.partitions.keys()
self.dtypes[name] = attrs['type']
self.shapes[name] = []
self.name = name
self.partitions[name] = tuple()
else:
if tag == 'dimension' and self.name in self.partitions.keys():
if attrs['name'] == self._iterkey:
self.partitions[self.name] += (self._partkey,)
else:
self.partitions[self.name] += (attrs['name'],)
Reader.startElement(self, tag, attrs)
def get_file_object(self, name, indices):
if name in self.partitions.keys():
# The first index is the partition iterable
if len(indices) == 0:
return self.get_partition_object(name)
i, indices = indices[0], indices[1:]
partshape = [self.dims[dim] for dim in self.partitions[name]]
name += self._iterpattern % i
self.shapes[name] = partshape[1:] #HACK
return Reader.get_file_object(self, name, indices)
def get_data_type(self, name):
if name not in self.dtypes.keys():
try:
name, partname = name.rsplit('/',1)
except ValueError:
raise KeyError(name)
assert name in self.partitions.keys()
return Reader.get_data_type(self, name)
def get_partition_object(self, name):
assert name in self.partitions.keys()
dtype, type, itemsize = self.get_data_type(name)
shape = self.shapes[name]
size = itemsize * np.prod(shape, dtype=int)
partshape = [self.dims[dim] for dim in self.partitions[name]]
partsize = itemsize * np.prod(partshape, dtype=int)
fileobjs = []
for i in range(self.dims[self._iterkey]):
fileobj = self.tar.extractfile(name + self._iterpattern % i)
fileobjs.append(fileobj)
return _FakeFileObject(fileobjs, partsize), shape, size, dtype
|
robwarm/gpaw-symm
|
gpaw/io/tarext.py
|
Python
|
gpl-3.0
| 5,602
|
[
"GPAW"
] |
bf80a9db2d5f73b495af7f5f7602577d40bc1871a9172b3bcdd5d68fa81c5bef
|
print("Hello")
def downsample(w_m, f_m, w_TRES):
'''Given a model wavelength and flux (w_m, f_m) and the instrument wavelength (w_TRES), downsample the model to
exactly match the TRES wavelength bins. '''
spec_interp = interp1d(w_m, f_m, kind="linear")
@np.vectorize
def avg_bin(bin0, bin1):
mdl_ind = (w_m > bin0) & (w_m < bin1)
wave = np.empty((np.sum(mdl_ind) + 2,))
flux = np.empty((np.sum(mdl_ind) + 2,))
wave[0] = bin0
wave[-1] = bin1
flux[0] = spec_interp(bin0)
flux[-1] = spec_interp(bin1)
wave[1:-1] = w_m[mdl_ind]
flux[1:-1] = f_m[mdl_ind]
return trapz(flux, wave) / (bin1 - bin0)
#Determine the bin edges
edges = np.empty((len(w_TRES) + 1,))
difs = np.diff(w_TRES) / 2.
edges[1:-1] = w_TRES[:-1] + difs
edges[0] = w_TRES[0] - difs[0]
edges[-1] = w_TRES[-1] + difs[-1]
b0s = edges[:-1]
b1s = edges[1:]
samp = avg_bin(b0s, b1s)
return (samp)
def downsample2(w_m, f_m, w_TRES):
'''Given a model wavelength and flux (w_m, f_m) and the instrument wavelength (w_TRES), downsample the model to
exactly match the TRES wavelength bins. Try this without calling the interpolation routine.'''
@np.vectorize
def avg_bin(bin0, bin1):
mdl_ind = (w_m > bin0) & (w_m < bin1)
length = np.sum(mdl_ind) + 2
wave = np.empty((length,))
flux = np.empty((length,))
wave[0] = bin0
wave[-1] = bin1
wave[1:-1] = w_m[mdl_ind]
flux[1:-1] = f_m[mdl_ind]
flux[0] = flux[1]
flux[-1] = flux[-2]
return trapz(flux, wave) / (bin1 - bin0)
#Determine the bin edges
edges = np.empty((len(w_TRES) + 1,))
difs = np.diff(w_TRES) / 2.
edges[1:-1] = w_TRES[:-1] + difs
edges[0] = w_TRES[0] - difs[0]
edges[-1] = w_TRES[-1] + difs[-1]
b0s = edges[:-1]
b1s = edges[1:]
return avg_bin(b0s, b1s)
def downsample3(w_m, f_m, w_TRES):
'''Given a model wavelength and flux (w_m, f_m) and the instrument wavelength (w_TRES), downsample the model to
exactly match the TRES wavelength bins. Try this only by averaging.'''
#More time could be saved by splitting up the original array into averageable chunks.
@np.vectorize
def avg_bin(bin0, bin1):
return np.average(f_m[(w_m > bin0) & (w_m < bin1)])
#Determine the bin edges
edges = np.empty((len(w_TRES) + 1,))
difs = np.diff(w_TRES) / 2.
edges[1:-1] = w_TRES[:-1] + difs
edges[0] = w_TRES[0] - difs[0]
edges[-1] = w_TRES[-1] + difs[-1]
b0s = edges[:-1]
b1s = edges[1:]
return avg_bin(b0s, b1s)
def downsample4(w_m, f_m, w_TRES):
out_flux = np.zeros_like(w_TRES)
len_mod = len(w_m)
#Determine the bin edges
len_TRES = len(w_TRES)
edges = np.empty((len_TRES + 1,))
difs = np.diff(w_TRES) / 2.
edges[1:-1] = w_TRES[:-1] + difs
edges[0] = w_TRES[0] - difs[0]
edges[-1] = w_TRES[-1] + difs[-1]
i_start = np.argwhere((w_m > edges[0]))[0][0] #return the first starting index for the model wavelength array
edges_i = 1
for i in range(len(w_m)):
if w_m[i] > edges[edges_i]:
i_finish = i - 1
out_flux[edges_i - 1] = np.mean(f_m[i_start:i_finish])
edges_i += 1
i_start = i_finish
if edges_i > len_TRES:
break
return out_flux
#Keep out here so memory keeps getting overwritten
fluxes = np.empty((4, len(wave_grid)))
def flux_interpolator_mini(temp, logg):
'''Load flux in a memory-nice manner. lnprob will already check that we are within temp = 2300 - 12000 and logg =
0.0 - 6.0, so we do not need to check that here.'''
#Determine T plus and minus
#If the previous check by lnprob was correct, these should always have elements
#Determine logg plus and minus
i_Tm = np.argwhere(temp >= T_points)[-1][0]
Tm = T_points[i_Tm]
i_Tp = np.argwhere(temp < T_points)[0][0]
Tp = T_points[i_Tp]
i_lm = np.argwhere(logg >= logg_points)[-1][0]
lm = logg_points[i_lm]
i_lp = np.argwhere(logg < logg_points)[0][0]
lp = logg_points[i_lp]
indexes = [(i_Tm, i_lm), (i_Tm, i_lp), (i_Tp, i_lm), (i_Tp, i_lp)]
points = np.array([(Tm, lm), (Tm, lp), (Tp, lm), (Tp, lp)])
for i in range(4):
#Load spectra for these points
#print(indexes[i])
fluxes[i] = LIB[indexes[i]]
if np.isnan(fluxes).any():
#If outside the defined grid (demarcated in the hdf5 object by nan's) just return 0s
return zero_flux
#Interpolate spectra with LinearNDInterpolator
flux_intp = LinearNDInterpolator(points, fluxes)
new_flux = flux_intp(temp, logg)
return new_flux
def flux_interpolator():
#points = np.loadtxt("param_grid_GWOri.txt")
points = np.loadtxt("param_grid_interp_test.txt")
#TODO: make this dynamic, specify param_grid dynamically too
len_w = 716665
fluxes = np.empty((len(points), len_w))
for i in range(len(points)):
fluxes[i] = load_flux(points[i][0], points[i][1])
#flux_intp = NearestNDInterpolator(points, fluxes)
flux_intp = LinearNDInterpolator(points, fluxes, fill_value=1.)
del fluxes
print("Loaded flux_interpolator")
return flux_intp
#Originally from PHOENIX_tools
def create_grid_parallel_Z0(ncores):
'''create an hdf5 file of the PHOENIX grid. Go through each T point, if the corresponding logg exists,
write it. If not, write nan.'''
f = h5py.File("LIB_2kms.hdf5", "w")
shape = (len(T_points), len(logg_points), len(wave_grid_coarse))
dset = f.create_dataset("LIB", shape, dtype="f")
# A thread pool of P processes
pool = mp.Pool(ncores)
param_combos = []
var_combos = []
for t, temp in enumerate(T_points):
for l, logg in enumerate(logg_points):
param_combos.append([t, l])
var_combos.append([temp, logg])
spec_gen = list(pool.map(process_spectrum_Z0, var_combos))
for i in range(len(param_combos)):
t, l = param_combos[i]
dset[t, l, :] = spec_gen[i]
f.close()
def process_spectrum_Z0(pars):
temp, logg = pars
try:
f = load_flux_full(temp, logg, True)[ind]
flux = resample_and_convolve(f,wave_grid_fine,wave_grid_coarse)
print("Finished %s, %s" % (temp, logg))
except OSError:
print("%s, %s does not exist!" % (temp, logg))
flux = np.nan
return flux
def load_flux_full_Z0(temp, logg, norm=False):
rname = "HiResFITS/PHOENIX-ACES-AGSS-COND-2011/Z-0.0/lte{temp:0>5.0f}-{logg:.2f}-0.0" \
".PHOENIX-ACES-AGSS-COND-2011-HiRes.fits".format(
temp=temp, logg=logg)
flux_file = pf.open(rname)
f = flux_file[0].data
L = flux_file[0].header['PHXLUM'] #W
if norm:
f = f * (L_sun / L)
print("Normalized luminosity to 1 L_sun")
flux_file.close()
print("Loaded " + rname)
return f
def flux_interpolator():
points = ascii.read("param_grid.txt")
T_list = points["T"].data
logg_list = points["logg"].data
fluxes = np.empty((len(T_list), len(w)))
for i in range(len(T_list)):
fluxes[i] = load_flux_npy(T_list[i], logg_list[i])
flux_intp = NearestNDInterpolator(np.array([T_list, logg_list]).T, fluxes)
return flux_intp
def flux_interpolator_np():
points = np.loadtxt("param_grid.txt")
print(points)
#T_list = points["T"].data
#logg_list = points["logg"].data
len_w = 716665
fluxes = np.empty((len(points), len_w))
for i in range(len(points)):
fluxes[i] = load_flux_npy(points[i][0], points[i][1])
flux_intp = NearestNDInterpolator(points, fluxes)
return flux_intp
def flux_interpolator_hdf5():
#load hdf5 file of PHOENIX grid
fhdf5 = h5py.File(LIB, 'r')
LIB = fhdf5['LIB']
index_combos = []
var_combos = []
for ti in range(len(T_points)):
for li in range(len(logg_points)):
for zi in range(len(Z_points)):
index_combos.append([T_arg[ti], logg_arg[li], Z_arg[zi]])
var_combos.append([T_points[ti], logg_points[li], Z_points[zi]])
#print(param_combos)
num_spec = len(index_combos)
points = np.array(var_combos)
fluxes = np.empty((num_spec, len(wave_grid)))
for i in range(num_spec):
t, l, z = index_combos[i]
fluxes[i] = LIB[t, l, z][ind]
flux_intp = LinearNDInterpolator(points, fluxes, fill_value=1.)
fhdf5.close()
del fluxes
gc.collect()
return flux_intp
import numpy as np
from scipy.interpolate import interp1d, InterpolatedUnivariateSpline, griddata
import matplotlib.pyplot as plt
import model as m
from scipy.special import hyp0f1, struve, j1
import PHOENIX_tools as pt
c_kms = 2.99792458e5 #km s^-1
f_full = pt.load_flux_full(5900, 3.5, True)
w_full = pt.w_full
#Truncate to Dave's order
#ind = (w_full > 5122) & (w_full < 5218)
ind = (w_full > 3000) & (w_full < 12000.6)
w_full = w_full[ind]
f_full = f_full[ind]
def calc_lam_grid(v=1., start=3700., end=10000):
'''Returns a grid evenly spaced in velocity'''
size = 600000 #this number just has to be bigger than the final array
lam_grid = np.zeros((size,))
i = 0
lam_grid[i] = start
vel = np.sqrt((c_kms + v) / (c_kms - v))
while (lam_grid[i] < end) and (i < size - 1):
lam_new = lam_grid[i] * vel
i += 1
lam_grid[i] = lam_new
return lam_grid[np.nonzero(lam_grid)][:-1]
#grid = calc_lam_grid(2.,start=3050.,end=11232.) #chosen to correspond to min U filter and max z filter
#wave_grid = calc_lam_grid(0.35, start=3050., end=11232.) #this spacing encapsulates the maximal velocity resolution
# of the PHOENIX grid, and corresponds to Delta lambda = 0.006 Ang at 5000 Ang.
#np.save('wave_grid_0.35kms.npy',wave_grid)
#Truncate wave_grid to Dave's order
wave_grid = np.load('wave_grid_0.35kms.npy')[:-1]
wave_grid = wave_grid[(wave_grid > 5165) & (wave_grid < 5190)]
np.save('wave_grid_trunc.npy', wave_grid)
@np.vectorize
def gauss_taper(s, sigma=2.89):
'''This is the FT of a gaussian w/ this sigma. Sigma in km/s'''
return np.exp(-2 * np.pi ** 2 * sigma * 2 * s ** 2)
def convolve_gauss(wl, fl, sigma=2.89, spacing=2.):
##Take FFT of f_grid
out = np.fft.fft(np.fft.fftshift(fl))
N = len(fl)
freqs = np.fft.fftfreq(N, d=spacing)
taper = gauss_taper(freqs, sigma)
tout = out * taper
blended = np.fft.fftshift(np.fft.ifft(tout))
return np.absolute(blended) #remove tiny complex component
def IUS(w, f, wl):
f = InterpolatedUnivariateSpline(w, f)
return f(wl)
def plot_interpolated():
f_grid = IUS(w_full, f_full, wave_grid)
np.save('f_grid.npy', f_grid)
print("Calculated flux_grid")
print("Length flux grid", len(f_grid))
f_grid6 = convolve_gauss(wave_grid, f_grid, spacing=0.35)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(wave_grid, f_grid)
#ax.plot(m.wls[0], IUS(wave_grid, f_grid6, m.wls[0]),"o")
plt.show()
@np.vectorize
def lanczos_kernel(x, a=2):
if np.abs(x) < a:
return np.sinc(np.pi * x) * np.sinc(np.pi * x / a)
else:
return 0.
def grid_interp():
return griddata(grid, blended, m.wls, method='linear')
def G(s, vL):
'''vL in km/s. Gray pg 475'''
ub = 2. * np.pi * vL * s
return j1(ub) / ub - 3 * np.cos(ub) / (2 * ub ** 2) + 3. * np.sin(ub) / (2 * ub ** 3)
def plot_gray():
fig = plt.figure()
ax = fig.add_subplot(111)
ss = np.linspace(0.001, 2, num=200)
Gs1 = G(ss, 1.)
Gs2 = G(ss, 2.)
ax.plot(ss, Gs1)
ax.plot(ss, Gs2)
plt.show()
def main():
plot_interpolated()
pass
if __name__ == "__main__":
main()
#Old sinc interpolation routines that didn't work out
#Test sinc interpolation
#def func(x):
# return (x - 3)**2 + 2 * x
#
#xs = np.arange(-299,301,1)
#ys = xs
#
#def sinc_interpolate(x):
# ind = np.argwhere(x > xs )[-1][0]
# ind2 = ind + 1
# print("ind",ind)
# print(xs[ind])
# print(xs[ind2])
# frac = x - xs[ind]
# print(frac)
# spacing = 1
# pts_grid = np.arange(-299.5,300,1)
# sinc_pts = np.sinc(pts_grid)
# print(pts_grid,sinc_pts,trapz(sinc_pts))
# flux_pts = ys
# print("Interpolated value",np.sum(sinc_pts * flux_pts))
# print("Neighboring value", ys[ind], ys[ind2])
# return(sinc_pts,flux_pts)
#Now, do since interpolation to the TRES pixels on the blended spectrum
##Take TRES pixel, call that the center of sinc, then sample it at +/- the other pixels in the grid
#def sinc_interpolate(wl_TRES):
# ind = np.argwhere(wl_TRES > grid)[-1][0]
# ind2 = ind + 1
# print(grid[ind])
# print(grid[ind2])
# frac = (wl_TRES - grid[ind])/(grid[ind2] - grid[ind])
# print(frac)
# spacing = 2 #km/s
# veloc_grid = np.arange(-48.,51,spacing) - frac * spacing
# print(veloc_grid)
# #convert wl spacing to velocity spacing
# sinc_pts = 0.5 * np.sinc(0.5 * veloc_grid)
# print(sinc_pts,trapz(sinc_pts,veloc_grid))
# print("Interpolated flux",np.sum(sinc_pts * f_grid[ind - 25: ind + 25]))
# print("Neighboring flux", f_grid[ind], f_grid[ind2])
#sinc_interpolate(6610.02)
|
BrownDwarf/Starfish
|
attic/old_code.py
|
Python
|
bsd-3-clause
| 13,197
|
[
"Gaussian"
] |
cbe422c4a6f5697935996787e3a5a4d01bd1dc95501ced0b8d3978a2ca15fba2
|
"""This contains a set of tests for paratemp.coordinate_analysis"""
########################################################################
# #
# This test was written by Thomas Heavey in 2018. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2017-18 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import
import shutil
import matplotlib
import numpy as np
import pandas as pd
import pytest
from paratemp import cd
matplotlib.use('agg')
def test_matplotlib_testing_backend():
# Travis should fail if this isn't true, but hopefully this makes it
# clearer as to why it failed.
assert matplotlib.get_backend() == 'agg'
class TestXTCUniverse(object):
def test_import(self):
from paratemp import coordinate_analysis as ca
from paratemp import Universe
assert ca.Universe == Universe
from MDAnalysis import Universe as MdUniverse
assert issubclass(Universe, MdUniverse)
assert issubclass(ca.Universe, MdUniverse)
@pytest.fixture
def universe_class(self) -> type:
from paratemp import Universe
return Universe
@pytest.fixture
def univ(self, tmp_path, path_test_data, universe_class):
gro = path_test_data / 'spc2.gro'
traj = path_test_data / 't-spc2-traj.xtc'
shutil.copy(gro, tmp_path)
shutil.copy(traj, tmp_path)
with cd(tmp_path):
_univ = universe_class(gro.name,
traj.name,
temp=205.)
return _univ
@pytest.fixture
def univ_w_a(self, univ):
univ.calculate_distances(a='4 5',
read_data=False, save_data=False)
return univ
@pytest.fixture
def univ_pbc(self, tmp_path, path_test_data, universe_class):
gro = path_test_data / 'spc2.gro'
traj = path_test_data / 'spc2-traj-pbc.xtc'
shutil.copy(gro, tmp_path)
shutil.copy(traj, tmp_path)
with cd(tmp_path):
_univ = universe_class(gro.name,
traj.name,
temp=205.)
return _univ
@pytest.fixture
def ref_a_pbc_dists(self, path_ref_data):
import pandas
return pandas.read_csv(path_ref_data / 'spc2-a-pbc-dists.csv',
index_col=0)
def test_distance_str(self, univ, ref_a_dists):
univ.calculate_distances(a='4 5',
read_data=False, save_data=False)
assert np.isclose(ref_a_dists, univ.data['a']).all()
def test_distance_list_int(self, univ, ref_a_dists):
univ.calculate_distances(a=[4, 5],
read_data=False, save_data=False)
assert np.isclose(ref_a_dists, univ.data['a']).all()
def test_distance_list_str(self, univ, ref_a_dists):
univ.calculate_distances(a=['4', '5'],
read_data=False, save_data=False)
assert np.isclose(ref_a_dists, univ.data['a']).all()
def test_calculate_distances_no_recalc(self, univ_w_a, capsys):
univ_w_a.calculate_distances(a=[4, 5],
read_data=False, save_data=False)
out, err = capsys.readouterr()
assert out == 'Nothing (new) to calculate here.\n'
def test_calculate_distances_yes_recalc(self, univ_w_a):
"""
:type univ_w_a: paratemp.coordinate_analysis.Universe
"""
univ_w_a.calculate_distances(a='5 5', recalculate=True,
read_data=False, save_data=False)
assert (np.array([0., 0.]) == univ_w_a.data['a']).all()
def test_distance_pbc(self, univ_pbc, ref_a_pbc_dists):
univ_pbc.calculate_distances(a='4 5',
read_data=False, save_data=False)
assert np.isclose(ref_a_pbc_dists['a'], univ_pbc.data['a']).all()
def test_distances_com(self, univ, ref_g_dists):
univ.calculate_distances(
read_data=False, save_data=False,
g=((1, 2), (3, 4)))
assert np.isclose(ref_g_dists, univ.data).all()
def test_calculate_distance_raises(self, univ):
with pytest.raises(SyntaxError):
univ.calculate_distances(1, read_data=False, save_data=False)
with pytest.raises(SyntaxError):
univ.calculate_distances(a=['0', '5'],
read_data=False, save_data=False)
with pytest.raises(SyntaxError):
univ.calculate_distances(a=['1', '2', '5'],
read_data=False, save_data=False)
with pytest.raises(NotImplementedError):
univ.calculate_distances(a=['fail', 'here'],
read_data=False, save_data=False)
def test_calculate_distance_warns(self, univ):
with pytest.warns(UserWarning,
match='following positional arguments were given'):
univ.calculate_distances('fail', read_data=False, save_data=False)
def test_fes_1d_data_str(self, univ_w_a, ref_delta_g, ref_bins):
"""
:type univ_w_a: paratemp.coordinate_analysis.Universe
:type ref_delta_g: np.ndarray
:type ref_bins: np.ndarray
"""
delta_g_str, bins_str, lines_str, fig_str, ax_str = \
univ_w_a.fes_1d('a')
assert np.allclose(delta_g_str, ref_delta_g)
assert np.allclose(bins_str, ref_bins)
def test_fes_1d_data_data(self, univ_w_a, ref_delta_g, ref_bins):
"""
:type univ_w_a: paratemp.coordinate_analysis.Universe
:type ref_delta_g: np.ndarray
:type ref_bins: np.ndarray
"""
delta_g_data, bins_data, lines_data, fig_data, ax_data = \
univ_w_a.fes_1d(univ_w_a.data['a'])
assert np.allclose(delta_g_data, ref_delta_g)
assert np.allclose(bins_data, ref_bins)
def test_final_time_str(self, univ):
assert univ.final_time_str == '2ps'
univ._last_time = 1001.0
assert univ.final_time_str == '1ns'
univ._last_time = 32111222.12
assert univ.final_time_str == '32us'
univ._last_time = 5.1e12
assert univ.final_time_str == '5100ms'
def test_save_data(self, univ_w_a, tmp_path, capsys):
time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns'
f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
univ_w_a.save_data()
out, err = capsys.readouterr()
assert (tmp_path / f_name).exists()
with pd.HDFStore(f_name) as store:
df = store[time]
assert out == 'Saved data to {f_name}[{time}]\n'.format(
f_name=f_name, time=time)
assert np.allclose(df, univ_w_a.data)
def test_save_data_no_new(self, univ_w_a, tmp_path, capsys):
time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns'
f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
univ_w_a.save_data()
capsys.readouterr()
univ_w_a.save_data()
out, err = capsys.readouterr()
assert (tmp_path / f_name).exists()
with pd.HDFStore(f_name) as store:
df = store[time]
assert out == 'No data added to {f_name}[{time}]\n'.format(
f_name=f_name, time=time)
assert np.allclose(df, univ_w_a.data)
def test_save_data_add_new(self, univ, univ_w_a, tmp_path, capsys):
time = 'time_' + str(int(univ_w_a._last_time / 1000)) + 'ns'
f_name = univ_w_a.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
univ_w_a.save_data()
capsys.readouterr()
univ.calculate_distances(b='4 5', save_data=False)
univ.save_data()
out, err = capsys.readouterr()
assert out == 'Saved data to {f_name}[{time}]\n'.format(
f_name=f_name, time=time)
def test_read_data(self, univ, univ_w_a, tmp_path, capsys):
"""
:type univ_w_a: paratemp.Universe
:type univ: paratemp.Universe
"""
with cd(tmp_path):
univ_w_a.save_data()
capsys.readouterr() # just so it doesn't print
univ.read_data()
assert (univ_w_a.data == univ.data).all().all()
def test_read_data_no_data(self, univ, tmp_path, capsys):
"""
:type univ: paratemp.Universe
"""
time = 'time_' + str(int(univ._last_time / 1000)) + 'ns'
f_name = univ.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
with pytest.raises(IOError, match=r'This data does not exist!\n'
r'{}\[{}\]'.format(f_name,
time)):
univ.read_data()
univ.read_data(ignore_no_data=True)
out, err = capsys.readouterr()
assert out == 'No data to read in {}[{}]\n'.format(f_name, time)
def test_calculate_distances_save(self, univ, tmp_path, capsys):
"""
:type univ: paratemp.Universe
"""
time = 'time_' + str(int(univ._last_time / 1000)) + 'ns'
f_name = univ.trajectory.filename.replace('xtc', 'h5')
with cd(tmp_path):
univ.calculate_distances(a='4 5')
out, err = capsys.readouterr()
assert (tmp_path / f_name).exists()
with pd.HDFStore(f_name) as store:
df = store[time]
assert out == 'Saved data to {f_name}[{time}]\n'.format(
f_name=f_name, time=time)
assert np.allclose(df, univ.data)
def test_calculate_distances_read(self, univ_w_a, tmp_path, capsys):
"""
:type univ_w_a: paratemp.Universe
"""
with cd(tmp_path):
univ_w_a.save_data()
capsys.readouterr()
univ_w_a._data = univ_w_a._init_dataframe()
univ_w_a.calculate_distances(a='4 5')
out, err = capsys.readouterr()
assert out == 'Nothing (new) to calculate here.\n'
def test_select_frames(self, univ_pbc, capsys):
u = univ_pbc
u.calculate_distances(a='4 5',
read_data=False, save_data=False)
frames = u.select_frames({'a': (0.1, 0.75)}, 'short')
out, err = capsys.readouterr()
assert out == 'These criteria include 1 frame\n'
assert (u.data['short'] == [False, True]).all()
assert (frames == [1]).all()
def test_update_num_frames(self, univ, capsys, path_test_data):
old_lt, old_nf = univ._last_time, univ._num_frames
univ.load_new([str(path_test_data / 't-spc2-traj.xtc'),
str(path_test_data / 'spc2-traj-pbc.xtc')])
univ.update_num_frames()
out, err = capsys.readouterr()
assert old_lt != univ._last_time
assert old_nf != univ._num_frames
assert out == 'Updating num of frames from {} to {}'.format(
old_nf, univ._num_frames) + '\nand the final time.\n'
class TestXTCTaddol(TestXTCUniverse):
@pytest.fixture
def universe_class(self) -> type:
from paratemp.coordinate_analysis import Taddol
return Taddol
# TODO add further Universe tests
# ignore_file_change=True
# fes_2d
# calculate_dihedrals
# figure from fes_1d
# figure from fes_2d
|
theavey/ParaTemp
|
tests/test_coordinate_analysis.py
|
Python
|
apache-2.0
| 12,849
|
[
"MDAnalysis"
] |
ea09fc53c0ca734458a759f8ecf4cb0c35ae1f6dbdad559ad570f43908be769a
|
from django.shortcuts import get_object_or_404, redirect, render
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.db.models import Q, Count
from django.contrib import messages
from django.utils.feedgenerator import Atom1Feed
from .models import Hug
from .templatetags.hugnet import hugcheck
from .forms import SettingsForm
from .secrets import tweeter, pushshsh
import datetime, twitter, json, random
def index(request):
hugs = Hug.objects.order_by("-timestamp")[:100]
return render(request, "hug/index.html", {"hugs": hugs, "userson":json.dumps([x.username for x in User.objects.filter(is_active=True)])})
@login_required
def settings(request):
if request.method == 'POST':
if getattr(request.user, "bonus_data", None):
form = SettingsForm(request.POST, instance=request.user.bonus_data)
else:
form = SettingsForm(request.POST, initial={'user': request.user})
form.user = request.user
if form.is_valid():
form.save()
messages.success(request, "Settings saved! <3")
return redirect(reverse("index"))
else:
messages.error(request, "Something's wrong...")
else:
if getattr(request.user, "bonus_data", None):
form = SettingsForm(instance=request.user.bonus_data)
else:
form = SettingsForm(initial={'user': request.user})
return render(request, "hug/settings.html", {"form":form})
def user(request, name):
usr = get_object_or_404(User, username=name)
hugs = Hug.objects.filter(Q(target=usr) | Q(source=usr)).order_by("-timestamp")
return render(request, "hug/user.html", {"hugs": hugs, 'usr': usr})
def user_hgd(request, name):
usr = get_object_or_404(User, username=name)
x = usr.hugs_given.values('target').annotate(hcount=Count('target'))
return JsonResponse(data={"data":[{"value": y["hcount"], "label": get_object_or_404(User, pk=y["target"]).username} for y in x]})
def user_hby(request, name):
usr = get_object_or_404(User, username=name)
x = usr.hugs_gotten.values('source').annotate(hcount=Count('source'))
return JsonResponse(data={"data":[{"value": y["hcount"], "label": get_object_or_404(User, pk=y["source"]).username} for y in x]})
def onehug(request, pk):
hug = get_object_or_404(Hug, pk=pk)
random.seed(hug)
return render(request, "hug/temp.html", {"hug":hug, "hcol": random.choice(["red", "pink", "purple", "deep-purple", "indigo", "blue", "light-blue", "cyan", "teal", "green", "light-green", "lime", "yellow", "amber", "orange", "deep-orange"])+random.choice([(" accent-%i" % x ) for x in range(1,5)]+[(" darken-%i" % x) for x in range(1,5)]+[(" lighten-%i" % x) for x in range(1,3)]+[""])})
@login_required
def showme(request):
return redirect(reverse("user", args=(request.user.username,)))
@login_required
def do_hug(request, target_name):
target = get_object_or_404(User, username__iexact=target_name)
hugcheck(request.user)
if request.user.bonus_data.tokenet():
x = Hug.objects.create(source=request.user, target=target)
tw = tweeter()
try:
tw.PostUpdate("%i: %s hugged %s! <3" % (x.pk, getattr(getattr(request.user, "bonus_data", None), "twitter", request.user.username), getattr(getattr(target, "bonus_data", None), "twitter", target.username)))
except twitter.error.TwitterError:
pass
pushshsh(x)
messages.success(request, "%s hugged! <3" % target.username)
else:
messages.warning(request, "Sorry, but trim down on hugs please, don't kill our server <3")
return redirect(reverse("index"))
def do_hug_r(request):
if "name" in request.GET and User.objects.filter(username__iexact=request.GET["name"]).count() > 0: return redirect(reverse("do_hug", args=(request.GET["name"],)))
messages.error(request, "I don't know that person...")
return redirect(reverse("index"))
@login_required
def rehug(request, pk):
h = get_object_or_404(Hug, pk=pk)
hugcheck(request.user)
if request.user.bonus_data.tokenet():
x = Hug.objects.create(source=request.user, target=h.target, inspiration=h)
tw = tweeter()
try:
tw.PostUpdate("%i: %s rehugged %s! <3" % (x.pk, getattr(getattr(request.user, "bonus_data", None), "twitter", request.user.username), getattr(getattr(h.target, "bonus_data", None), "twitter", h.target.username)))
except twitter.error.TwitterError:
pass
pushshsh(x)
messages.success(request, "%s was rehugged! <3" % h.target.username)
else:
messages.warning(request, "Sorry, but trim down on hugs please, don't kill our server <3")
return redirect(reverse("index"))
def history(request, pk):
return render(request, "hug/history.html", {"hug": get_object_or_404(Hug, pk=pk)})
@login_required
def hugback(request, pk):
h = get_object_or_404(Hug, pk=pk)
hugcheck(request.user)
if request.user.bonus_data.tokenet():
x = Hug.objects.create(source=request.user, target=h.source, inspiration=h)
tw = tweeter()
try:
tw.PostUpdate("%i: %s hugged %s back! <3" % (x.pk, getattr(getattr(request.user, "bonus_data", None), "twitter", request.user.username), getattr(getattr(h.source, "bonus_data", None), "twitter", h.source.username)))
except twitter.error.TwitterError:
pass
pushshsh(x)
messages.success(request, "%s was hugged back! <3" % h.source)
else:
messages.warning(request, "Sorry, but trim down on hugs please, don't kill our server <3")
return redirect(reverse("index"))
# feeds start here
class HugsFeed(Feed):
def item_title(self, item):
return item.nameme()
def item_description(self, item):
return item.inspiron()
def item_link(self, item):
return reverse("history", args=(item.pk,))
def item_guid(self, item):
return "%i" % item.pk
def item_pubdate(self, item):
return item.timestamp
class AllHugsFeedRss(HugsFeed):
title = "nyuuu.ovh all hugs"
link = "https://nyuuu.ovh/"
description = "All hugs that happen on nyuuu.ovh"
def items(self):
return Hug.objects.order_by('-timestamp')[:50]
class AllHugsFeedAtom(AllHugsFeedRss):
feed_type = Atom1Feed
subtitle = AllHugsFeedRss.description
class UserHgdFeedRss(HugsFeed):
def get_object(self, request, username):
return get_object_or_404(User, username=username)
def title(self, obj):
return "nyuuu.ovh hugs from %s" % obj.username
def link(self, obj):
return "https://nyuuu.ovh/who-is/%s/" % obj.username
def description(self, obj):
return "All hugs performed by %s on nyuuu.ovh" % obj.username
def items(self, obj):
return obj.hugs_given.order_by("-timestamp")[:50]
class UserHgdFeedAtom(UserHgdFeedRss):
feed_type = Atom1Feed
subtitle = UserHgdFeedRss.description
class UserHbyFeedRss(HugsFeed):
def get_object(self, request, username):
return get_object_or_404(User, username=username)
def title(self, obj):
return "nyuuu.ovh hugs to %s" % obj.username
def link(self, obj):
return "https://nyuuu.ovh/who-is/%s/" % obj.username
def description(self, obj):
return "All hugs received by %s on nyuuu.ovh" % obj.username
def items(self, obj):
return obj.hugs_gotten.order_by("-timestamp")[:50]
class UserHbyFeedAtom(UserHbyFeedRss):
feed_type = Atom1Feed
subtitle = UserHbyFeedRss.description
|
michcioperz/nyuuu
|
hug/views.py
|
Python
|
apache-2.0
| 7,756
|
[
"Amber"
] |
274faf92af94be213f9790ab14f7f0661b1e766bf2eee5c3f44c68d1459e1820
|
#!/usr/bin/env python
import sys
import gfxprim.core as core
import gfxprim.loaders as loaders
import gfxprim.filters as filters
def main():
if len(sys.argv) != 3:
print("usage: blur blur-radii image")
sys.exit(1)
radii = float(sys.argv[1])
# Load Image
img = loaders.load(sys.argv[2])
# Do in-place gaussian blur
filters.gaussian_blur(img, img, radii, radii)
# Save result
img.loaders.save_jpg("out.jpg")
if __name__ == '__main__':
main()
|
gfxprim/gfxprim
|
demos/py_simple/blur.py
|
Python
|
lgpl-2.1
| 497
|
[
"Gaussian"
] |
d059bc31a63f3d2843d3be9b744a54ae04f8788232ab8b454c1d95c84f7a77c0
|
import numpy as np
from alis import almsgs
from alis import alfunc_base
import astropy.units as u
msgs = almsgs.msgs()
try:
from linetools.spectra.lsf import LSF as ltLSF
except ImportError:
msgs.warn("linetools is not installed. Install it if you wish to use LSF")
class LSF(alfunc_base.Base) :
"""
Convolves the spectrum with a line spread function from linetools.
The only input parameter is a dummy parameter at this moment.
"""
def __init__(self, prgname="", getinst=False, atomic=None, verbose=2):
self._idstr = 'lsf' # ID string for this class
self._pnumr = 1 # Total number of parameters fed in
self._keywd = dict({'name':'COS', 'grating':'G130M', 'life_position':1, 'cen_wave':'1309', 'blind':False}) # Additional arguments to describe the model --- 'input' cannot be used as a keyword
self._keych = dict({'name':1, 'grating':0, 'life_position':0, 'cen_wave':0, 'blind':0}) # Require keywd to be changed (1 for yes, 0 for no)
self._keyfm = dict({'name':"", 'grating':"", 'life_position':"", 'cen_wave':"", 'blind':""}) # Require keywd to be changed (1 for yes, 0 for no)
self._parid = ['scale'] # Name of each parameter
self._defpar = [ 1.0 ] # Default values for parameters that are not provided
self._fixpar = [ True ] # By default, should these parameters be fixed?
self._limited = [ [1 ,0 ] ] # Should any of these parameters be limited from below or above
self._limits = [ [0.0,0.0] ] # What should these limiting values be
self._svfmt = [ "{0:.3g}" ] # Specify the format used to print or save output
self._prekw = [] # Specify the keywords to print out before the parameters
# DON'T CHANGE THE FOLLOWING --- it tells ALIS what parameters are provided by the user.
tempinput = self._parid+list(self._keych.keys()) #
self._keywd['input'] = dict(zip((tempinput),([0]*np.size(tempinput)))) #
########################################################################
self._verbose = verbose
# Set the atomic data
self._atomic = atomic
if getinst: return
def call_CPU(self, x, y, p, ncpus=1):
"""
Define the functional form of the model
--------------------------------------------------------
x : array of wavelengths
y : model flux array
p : array of parameters for this model
--------------------------------------------------------
"""
if p[0] > 0.0:
ysize = y.size
lsf_dict = dict(name=self._keywd['name'],
grating=self._keywd['grating'],
life_position=str(self._keywd['life_position']),
cen_wave=self._keywd['cen_wave'])
try:
lsf_val = ltLSF(lsf_dict, scalefactor=p[0])
except:
lsf_val = ltLSF(lsf_dict)
tab = lsf_val.interpolate_to_wv_array(x * u.AA)
lsfk = tab["kernel"].data
size = ysize + lsfk.size - 1
fsize = 2 ** np.int(np.ceil(np.log2(size))) # Use this size for a more efficient computation
conv = np.fft.fft(y, fsize)
conv *= np.fft.fft(lsfk/lsfk.sum(), fsize)
ret = np.fft.ifft(conv).real.copy()
del conv
return ret[ysize//2:ysize//2+ysize]
else:
return y
def getminmax(self, par, fitrng, Nsig=30.0):
"""
This definition is only used for specifying the
FWHM Resolution of the data.
--------------------------------------------------------
This definition will return the additional wavelength range
of the data to be extracted around the user-speficied fitrange
to ensure the edges of the model near the min and max of
fitrange aren't affected.
--------------------------------------------------------
par : The input parameters which defines the FWHM of this
function
fitrng : The fitrange specified by the user at input
Nsig : Width in number of sigma to extract either side of
fitrange
"""
lsf_dict = dict(name=self._keywd['name'],
grating=self._keywd['grating'],
life_position=str(self._keywd['life_position']),
cen_wave=self._keywd['cen_wave'])
lsf_val = ltLSF(lsf_dict)
tab = lsf_val.interpolate_to_wv0(float(self._keywd['cen_wave']) * u.AA)
# Roughly estimate the FWHM
w = np.where(tab["kernel"].data >= 0.5 * np.max(tab["kernel"].data))
dwav = np.max(tab["wv"].data[w]) - np.min(tab["wv"].data[w])
fwhmv = 299792.458 * dwav / 1309.0
# Use the parameters to now calculate the sigma width
sigd = fwhmv / ( 2.99792458E5 * ( 2.0*np.sqrt(2.0*np.log(2.0)) ) )
# Calculate the min and max extraction wavelengths
wmin = fitrng[0]*(1.0 - Nsig*sigd)
wmax = fitrng[1]*(1.0 + Nsig*sigd)
return wmin, wmax
def load(self, instr, cntr, mp, specid, forcefix=False):
"""
Load the parameters in the input model file
--------------------------------------------------------
instr: input string for the parameters and keywords of
model to be loaded (ignoring the identifier at
the beginning of the model line).
cntr : The line number of the model (e.g. if it's the
first model line, cntr=0)
mp : modpass --> A dictionary with the details of all
models read in so far.
--------------------------------------------------------
Nothing should be changed here when writing a new function.
--------------------------------------------------------
"""
def check_tied_param(ival, cntr, mps, iind):
havtie = False
tieval=ival.lstrip('+-.0123456789')
if tieval[0:2] in ['E+', 'e+', 'E-', 'e-']: # Scientific Notation is used.
tieval=tieval[2:].lstrip('.0123456789')
inval=float(ival.rstrip(tieval))
if len(tieval) == 0: # Parameter is not tied
mps['mtie'][cntr].append(-1)
if forcefix:
mps['mfix'][cntr].append(1)
else:
mps['mfix'][cntr].append(0)
else: # parameter is tied
# Determine if this parameter is fixed
if tieval[0].isupper() or forcefix: mps['mfix'][cntr].append(1)
else: mps['mfix'][cntr].append(0)
# Check if this tieval has been used before
if len(mps['tpar']) == 0: # If it's the first known tied parameter in the model
mps['tpar'].append([])
mps['tpar'][0].append(tieval)
mps['tpar'][0].append(len(mps['p0']))
mps['mtie'][cntr].append(-1) # i.e. not tied to anything
else:
for j in range(0,len(mps['tpar'])):
if mps['tpar'][j][0] == tieval:
mps['mtie'][cntr].append(j)
havtie = True
if havtie == False: # create a New tied parameter
mps['tpar'].append([])
mps['tpar'][-1].append(tieval)
mps['tpar'][-1].append(len(mps['p0']))
mps['mtie'][cntr].append(-1) # i.e. not tied to anything
if havtie == False: mps['p0'].append(inval)
mps['mpar'][cntr].append(inval)
mps['mlim'][cntr].append([self._limits[iind][i] if self._limited[iind][i]==1 else None for i in range(2)])
return mps
################
# Convert colon back to equals so that it's interpreted as a keyword
instr = instr.replace(":", "=")
isspl = instr.split(",")
# Seperate the parameters from the keywords
kywrd = []
keywdk = list(self._keywd.keys())
keywdk[:] = (kych for kych in keywdk if kych[:] != 'input') # Remove the keyword 'input'
param = [str(self._defpar[all]) for all in range(self._pnumr)]
parid = [i for i in range(self._pnumr)]
for i in range(len(isspl)):
if "=" in isspl[i]:
kwspl = isspl[i].split('=')
if kwspl[0] in keywdk:
self._keywd['input'][kwspl[0]]=1
kywrd.append(isspl[i])
else: msgs.error("Keyword '"+isspl[i]+"' is unknown for -"+msgs.newline()+self._idstr+" "+instr)
else:
param[i] = isspl[i]
self._keywd['input'][self._parid[i]]=1
# Do some quick checks
if len(param) != self._pnumr:
msgs.error("Incorrect number of parameters (should be "+str(self._pnumr)+"):"+msgs.newline()+self._idstr+" "+instr)
# Set the parameters:
mp['mtyp'].append(self._idstr)
mp['mpar'].append([])
mp['mtie'].append([])
mp['mfix'].append([])
mp['mlim'].append([])
for i in range(self._pnumr):
mp = check_tied_param(param[i], cntr, mp, i)
# Now load the keywords:
for i in range(len(kywrd)):
kwspl = kywrd[i].split('=')
ksspl = kwspl[1].split(',')
for j in range(len(ksspl)):
if type(self._keywd[kwspl[0]]) is int:
typeval='integer'
self._keywd[kwspl[0]] = int(kwspl[1])
elif type(self._keywd[kwspl[0]]) is str:
typeval='string'
self._keywd[kwspl[0]] = kwspl[1]
elif type(self._keywd[kwspl[0]]) is float:
typeval='float'
self._keywd[kwspl[0]] = float(kwspl[1])
elif type(self._keywd[kwspl[0]]) is list:
typeval='list'
self._keywd[kwspl[0]].append(kwspl[1])
elif type(self._keywd[kwspl[0]]) is bool:
if kwspl[1] in ['True', 'False']:
typeval='boolean'
self._keywd[kwspl[0]] = kwspl[1] in ['True']
else:
typeval='string'
self._keywd[kwspl[0]] = kwspl[1]
msgs.warn(kwspl[0]+" should be of type boolean (True/False)", verbose=self._verbose)
elif self._keywd[kwspl[0]] is None:
typeval='None'
self._keywd[kwspl[0]] = None
else:
msgs.error("I don't understand the format on line:"+msgs.newline()+self._idstr+" "+instr)
self._keych[kwspl[0]] = 0 # Set keych for this keyword to zero to show that this has been changed
# Check that all required keywords were changed
for i in range(len(keywdk)):
if self._keych[keywdk[i]] == 1: msgs.error(keywdk[i]+" must be set for -"+msgs.newline()+self._idstr+" "+instr)
# Append the final set of keywords
mp['mkey'].append(self._keywd.copy())
return mp, parid
def parin(self, i, par):
"""
This routine converts a parameter in the input model file
to the parameter used in 'call'
--------------------------------------------------------
When writing a new function, one should change how each
input parameter 'par' is converted into a parameter used
in the function specified by 'call'
--------------------------------------------------------
"""
if i == 0: pin = par
return pin
def set_vars(self, p, level, mp, ival, wvrng=[0.0,0.0], spid='None', levid=None, nexbin=None, getinfl=False, ddpid=None, getstdd=None):
"""
Return the parameters for a Gaussian function to be used by 'call'
The only thing that should be changed here is the parb values
"""
levadd=0
params=np.zeros(self._pnumr)
parinf=[]
for i in range(self._pnumr):
lnkprm = None
if mp['mtie'][ival][i] >= 0:
getid = mp['tpar'][mp['mtie'][ival][i]][1]
elif mp['mtie'][ival][i] <= -2:
if len(mp['mlnk']) == 0:
lnkprm = mp['mpar'][ival][i]
else:
for j in range(len(mp['mlnk'])):
if mp['mlnk'][j][0] == mp['mtie'][ival][i]:
cmd = 'lnkprm = ' + mp['mlnk'][j][1]
namespace = dict({'p': p})
exec(cmd, namespace)
lnkprm = namespace['lnkprm']
levadd += 1
else:
getid = level+levadd
levadd+=1
if lnkprm is None:
params[i] = self.parin(i, p[getid])
if mp['mfix'][ival][i] == 0: parinf.append(getid) # If parameter not fixed, append it to the influence array
else:
params[i] = lnkprm
if ddpid is not None:
if ddpid not in parinf: return []
if nexbin is not None:
if params[0] == 0: return params, 1
if nexbin[0] == "km/s": return params, int(round(2.0*np.sqrt(2.0*np.log(2.0))*nexbin[1]/params[0] + 0.5))
elif nexbin[0] == "A" : msgs.error("bintype is set to 'A' for Angstroms, when FWHM is specified as a velocity.")
else: msgs.bug("bintype "+nexbin[0]+" should not have been specified in model function: "+self._idstr, verbose=self._verbose)
elif getstdd is not None:
fact = 2.0*np.sqrt(2.0*np.log(2.0))
return getstdd[1]*(1.0+getstdd[0]*params[0]/(fact*299792.458)), getstdd[2]*(1.0-getstdd[0]*params[0]/(fact*299792.458))
elif getinfl: return params, parinf
else: return params
|
rcooke-ast/ALIS
|
alis/alfunc_lsf.py
|
Python
|
gpl-3.0
| 14,125
|
[
"Gaussian"
] |
0560f432a9b24c8749028557bc111d34c9eb3445844004d043f673bd86d57014
|
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
A collection of plotting functions used by simulator/demos
import an usage for MAYAVI based plots should look like::
from tvb.simulator.plot.tools import *
if IMPORTED_MAYAVI:
plt = plot_function(...)
.. moduleauthor:: Stuart A. Knock <Stuart@tvb.invalid>
.. moduleauthor:: Paula Sanz Leon <paula.sanz-leon@univ-amu.fr>
"""
import numpy
import scipy as sp
import networkx as nx
from tvb.basic.logger.builder import get_logger
LOG = get_logger(__name__)
##----------------------------------------------------------------------------##
##- matplotlib based plotting functions -##
##---------------------------------------------------------------------------cd-##
import matplotlib as mpl
import matplotlib.pyplot as pyplot
import matplotlib.colors
import matplotlib.ticker as ticker
import matplotlib.colors as colors
try:
from mpl_toolkits.axes_grid import make_axes_locatable
IMPORTED_MPL_TOOLKITS = True
except ImportError:
IMPORTED_MPL_TOOLKITS = False
LOG.error("You need mpl_toolkits")
def _blob(x, y, area, colour):
"""
Draws a square-shaped blob with the given area (< 1) at
the given coordinates.
From : http://www.scipy.org/Cookbook/Matplotlib/HintonDiagrams
"""
hs = numpy.sqrt(area) / 2
xcorners = numpy.array([x - hs, x + hs, x + hs, x - hs])
ycorners = numpy.array([y - hs, y - hs, y + hs, y + hs])
pyplot.fill(xcorners, ycorners, colour, edgecolor=colour)
def hinton_diagram(connectivity_weights, num, maxWeight=None):
"""
Draws a Hinton diagram. This function temporarily disables matplotlib
interactive mode if it is on, otherwise this takes forever.
"""
weights_figure = pyplot.figure(num=num)
height, width = connectivity_weights.shape
if not maxWeight:
maxWeight = 2 ** numpy.ceil(numpy.log(numpy.max(numpy.abs(connectivity_weights))) / numpy.log(2))
#pyplot.fill(numpy.array([0,width,width,0]),numpy.array([0,0,height+0.5,height+0.5]),'gray')
pyplot.axis('equal')
weights_axes = weights_figure.gca()
for x in xrange(width):
for y in xrange(height):
_x = x + 1
_y = y + 1
w = connectivity_weights[y, x]
if w > 0:
_blob(_x - 1., height - _y + 0.0, min(1, w / maxWeight), 'red')
elif w < 0:
_blob(_x - 1., height - _y + 0.0, min(1, -w / maxWeight), 'black')
return weights_axes
def plot_connectivity(connectivity, num="weights", order_by=None, plot_hinton=False, plot_tracts=True):
"""
A 2D plot for visualizing the Connectivity.weights matrix
"""
labels = connectivity.region_labels
plot_title = connectivity.__class__.__name__
if order_by is None:
order = numpy.arange(connectivity.number_of_regions)
else:
order = numpy.argsort(order_by)
if order.shape[0] != connectivity.number_of_regions:
LOG.error("Ordering vector doesn't have length number_of_regions")
LOG.error("Check ordering length and that connectivity is configured")
return
# Assumes order is shape (number_of_regions, )
order_rows = order[:, numpy.newaxis]
order_columns = order_rows.T
if plot_hinton:
weights_axes = hinton_diagram(connectivity.weights[order_rows, order_columns], num)
else:
# weights matrix
weights_figure = pyplot.figure()
weights_axes = weights_figure.gca()
wimg = weights_axes.matshow(connectivity.weights[order_rows, order_columns])
weights_figure.colorbar(wimg)
weights_axes.set_title(plot_title)
if plot_tracts:
# tract lengths matrix
tracts_figure = pyplot.figure(num="tract-lengths")
tracts_axes = tracts_figure.gca()
timg = tracts_axes.matshow(connectivity.tract_lengths[order_rows, order_columns])
tracts_axes.set_title(plot_title)
tracts_figure.colorbar(timg)
if labels is None:
return
weights_axes.set_yticks(numpy.arange(connectivity.number_of_regions))
weights_axes.set_yticklabels(list(labels[order]), fontsize=8)
weights_axes.set_xticks(numpy.arange(connectivity.number_of_regions))
weights_axes.set_xticklabels(list(labels[order]), fontsize=8, rotation=90)
if plot_tracts:
tracts_axes.set_yticks(numpy.arange(connectivity.number_of_regions))
tracts_axes.set_yticklabels(list(labels[order]), fontsize=8)
tracts_axes.set_xticks(numpy.arange(connectivity.number_of_regions))
tracts_axes.set_xticklabels(list(labels[order]), fontsize=8, rotation=90)
def plot_local_connectivity(cortex, cutoff=None):
"""
Display the local connectivity function as a line plot. Four lines are
plotted of the equations defining the LocalConnectivity:
1) black, a 'high' resolution version evaluated out to a 'sufficiently
large distance', ie, this is what you ideally want to represent;
2) green, best case 'reality', based on shortest edge and cutoff
distance;
3) red, worst case 'reality', based on longest edge and cutoff distance;
4) blue, typical case 'reality', based on average edge length and cutoff
distance.
Usage, from demos directory, with tvb in your path ::
import tvb.datatypes.surfaces as surfaces
import plotting_tools
cortex = surfaces.Cortex()
plotting_tools.plot_local_connectivity(cortex, cutoff=60.)
plotting_tools.pyplot.show()
"""
dashes = ['--', # : dashed line -- blue
'-.', # : dash-dot line -- red
':', # : dotted line -- green
'-'] # : solid line -- black
#If necessary, add a default LocalConnectivity to ``local_connectivity``.
if cortex.local_connectivity is None:
LOG.info("local_connectivity is None, adding default LocalConnectivity")
cortex.local_connectivity = cortex.trait["local_connectivity"]
if cutoff:
cortex.local_connectivity.cutoff = cutoff
#We need a cutoff distance to work from...
if cortex.local_connectivity.cutoff is None:
LOG.error("You need to provide a cutoff...")
return
cutoff = cortex.local_connectivity.cutoff
cutoff_2 = 2.0 * cortex.local_connectivity.cutoff
pyplot.figure(num="Local Connectivity Cases")
pyplot.title("Local Connectivity Cases")
# ideally all these lines should overlap
#What we want
hi_res = 1024
step = 2.0 * cutoff_2 / (hi_res - 1)
hi_x = numpy.arange(-cutoff_2, cutoff_2 + step, step)
cortex.local_connectivity.equation.pattern = numpy.abs(hi_x)
pyplot.plot(hi_x, cortex.local_connectivity.equation.pattern, 'k',
linestyle=dashes[-1], linewidth=3)
#What we'll mostly get
avg_res = 2 * int(cutoff / cortex.edge_length_mean)
step = cutoff_2 / (avg_res - 1)
avg_x = numpy.arange(-cutoff, cutoff + step, step)
cortex.local_connectivity.equation.pattern = numpy.abs(avg_x)
pyplot.plot(avg_x, cortex.local_connectivity.equation.pattern, 'b',
linestyle=dashes[0], linewidth=3)
#It can be this bad
worst_res = 2 * int(cutoff / cortex.edge_length_max)
step = cutoff_2 / (worst_res - 1)
worst_x = numpy.arange(-cutoff, cutoff + step, step)
cortex.local_connectivity.equation.pattern = numpy.abs(worst_x)
pyplot.plot(worst_x, cortex.local_connectivity.equation.pattern, 'r',
linestyle=dashes[1], linewidth=3)
#This is as good as it gets...
best_res = 2 * int(cutoff / cortex.edge_length_min)
step = cutoff_2 / (best_res - 1)
best_x = numpy.arange(-cutoff, cutoff + step, step)
cortex.local_connectivity.equation.pattern = numpy.abs(best_x)
pyplot.plot(best_x, cortex.local_connectivity.equation.pattern, 'g',
linestyle=dashes[2], linewidth=3)
#Plot the cutoff
ymin, ymax = pyplot.ylim()
pyplot.plot([-cutoff, -cutoff], [ymin, ymax], "k--")
pyplot.plot([cutoff, cutoff], [ymin, ymax], "k--")
pyplot.xlim([-cutoff_2, cutoff_2])
pyplot.xlabel("Distance from focal point")
pyplot.ylabel("Strength")
pyplot.legend(("Theoretical", "Typical", "Worst", "Best", "Cutoff"))
# set the linewidth of the first legend object
#leg.legendHandles[0].set_linewidth(6.0)
#leg.legendHandles[1].set_linewidth(6.0)
#leg.legendHandles[2].set_linewidth(6.0)
#leg.legendHandles[3].set_linewidth(6.0)
def plot_pattern(pattern_object):
"""
pyplot in 2D the given X, over T.
"""
pyplot.figure(42)
pyplot.subplot(221)
pyplot.plot(pattern_object.spatial_pattern, "k*")
pyplot.title("Space")
#pyplot.plot(pattern_object.space, pattern_object.spatial_pattern, "k*")
pyplot.subplot(223)
pyplot.plot(pattern_object.time.T, pattern_object.temporal_pattern.T)
pyplot.title("Time")
pyplot.subplot(122)
pyplot.imshow(pattern_object(), aspect="auto")
pyplot.colorbar()
pyplot.title("Stimulus")
pyplot.xlabel("Time")
pyplot.ylabel("Space")
#pyplot.show()
def show_me_the_colours():
"""
Create a plot of matplotlibs built-in "named" colours...
"""
colours = matplotlib.colors.cnames.keys()
number_of_colors = len(colours)
colours_fig = pyplot.figure(num="Built-in colours")
rows = int(numpy.ceil(numpy.sqrt(number_of_colors)))
columns = int(numpy.floor(numpy.sqrt(number_of_colors)))
for k in range(number_of_colors):
ax = colours_fig.add_subplot(rows, columns, k)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_axis_bgcolor(colours[k])
ax.text(0.05, 0.5, colours[k])
def plot_matrix(mat, fig_name='plot_this_matrix', connectivity=None, binary_matrix=False):
"""
An embellished matshow display
"""
#NOTE: I could add more stuff in plot_connectivity, but I rather have
# a dummy function for displaying a pretty matrix with the
# value of each element.
from matplotlib import colors
fig, ax = pyplot.subplots(num=fig_name, figsize=(12,10))
if binary_matrix:
cmap = colors.ListedColormap(['black', 'white'])
bounds=[0,1,2]
norm = colors.BoundaryNorm(bounds, cmap.N)
p = ax.pcolormesh(mat, cmap=cmap, norm=norm, edgecolors='k')
ax.invert_yaxis()
cbar = fig.colorbar(p, cmap=cmap, norm=norm, boundaries=bounds, ticks=[0.5, 1.5])
cbar.ax.set_yticklabels(['no connections', 'connections'], fontsize=24)
else:
fig = pyplot.figure(num=fig_name)
ax = fig.gca()
res = ax.imshow(mat, cmap=pyplot.cm.coolwarm, interpolation='nearest')
fig.colorbar(res)
if connectivity is not None:
order = numpy.arange(connectivity.number_of_regions)
labels = connectivity.region_labels
pyplot.xticks(numpy.arange(connectivity.number_of_regions)+0.5, list(labels[order]), fontsize=10, rotation=90)
pyplot.yticks(numpy.arange(connectivity.number_of_regions)+0.5, list(labels[order]), fontsize=10)
width = mat.shape[0]
height = mat.shape[1]
# for x in xrange(width):
# for y in xrange(height):
# ax.annotate(str(int(mat[x][y])),
# xy=(y, x),
# horizontalalignment='center',
# verticalalignment = 'center',
# fontsize=10)
def plot_3d_centres(xyz):
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure(1)
fig.clf()
ax = Axes3D(fig)
ax.plot(xyz[:, 0], xyz[:, 1], xyz[:, 2], 'o', alpha=0.6)
ax.set_xlim([min(xyz[:, 0]), max(xyz[:, 0])])
ax.set_ylim([min(xyz[:, 1]), max(xyz[:, 1])])
ax.set_zlim([min(xyz[:, 2]), max(xyz[:, 2])])
ax.set_xlabel('x [mm]')
ax.set_ylabel('y [mm]')
ax.set_zlabel('z [mm]')
def plot_tri_matrix(mat, figure=None, num='plot_part_of_this_matrix', size=None,
cmap=pyplot.cm.RdBu_r, colourbar=True,
color_anchor=None, node_labels=None, x_tick_rot=0,
title=None):
r"""Creates a lower-triangle of a square matrix. Very often found to display correlations or coherence.
Parameters
----------
mat : square matrix
node_labels : list of strings with the labels to be applied to
the nodes. Defaults to '0','1','2', etc.
fig : a matplotlib figure
cmap : a matplotlib colormap.
title : figure title (eg '$\alpha$')
color_anchor : determines the clipping for the colormap.
If None, the data min, max are used.
If 0, min and max of colormap correspond to max abs(mat)
If (a,b), min and max are set accordingly (a,b)
Returns
-------
fig: a figure object
"""
def channel_formatter(x, pos=None):
thisidx = numpy.clip(int(x), 0, N - 1)
return node_labels[thisidx]
if figure is not None:
fig = figure
else :
if num is None:
fig = pyplot.figure()
else:
fig = pyplot.figure(num=num)
if size is not None:
fig.set_figwidth(size[0])
fig.set_figheight(size[1])
w = fig.get_figwidth()
h = fig.get_figheight()
ax_im = fig.add_subplot(1, 1, 1)
N = mat.shape[0]
idx = numpy.arange(N)
if colourbar:
if IMPORTED_MPL_TOOLKITS:
divider = make_axes_locatable(ax_im)
ax_cb = divider.new_vertical(size="10%", pad=0.1, pack_start=True)
fig.add_axes(ax_cb)
else:
pass
mat_copy = mat.copy()
#Null the upper triangle, including the main diagonal.
idx_null = numpy.triu_indices(mat_copy.shape[0])
mat_copy[idx_null] = numpy.nan
#Min max values
max_val = numpy.nanmax(mat_copy)
min_val = numpy.nanmin(mat_copy)
if color_anchor is None:
color_min = min_val
color_max = max_val
elif color_anchor == 0:
bound = max(abs(max_val), abs(min_val))
color_min = -bound
color_max = bound
else:
color_min = color_anchor[0]
color_max = color_anchor[1]
#The call to imshow produces the matrix plot:
im = ax_im.imshow(mat_copy, origin='upper', interpolation='nearest',
vmin=color_min, vmax=color_max, cmap=cmap)
#Formatting:
ax = ax_im
ax.grid(True)
#Label each of the cells with the row and the column:
if node_labels is not None:
for i in xrange(0, mat_copy.shape[0]):
if i < (mat_copy.shape[0] - 1):
ax.text(i - 0.3, i, node_labels[i], rotation=x_tick_rot)
if i > 0:
ax.text(-1, i + 0.3, node_labels[i],
horizontalalignment='right')
ax.set_axis_off()
ax.set_xticks(numpy.arange(N))
ax.xaxis.set_major_formatter(ticker.FuncFormatter(channel_formatter))
fig.autofmt_xdate(rotation=x_tick_rot)
ax.set_yticks(numpy.arange(N))
ax.set_yticklabels(node_labels)
ax.set_ybound([-0.5, N - 0.5])
ax.set_xbound([-0.5, N - 1.5])
#Make the tick-marks invisible:
for line in ax.xaxis.get_ticklines():
line.set_markeredgewidth(0)
for line in ax.yaxis.get_ticklines():
line.set_markeredgewidth(0)
ax.set_axis_off()
if title is not None:
ax.set_title(title)
if colourbar:
#Set the ticks - if 0 is in the interval of values, set that, as well
#as the min, max values:
if min_val < 0:
ticks = [color_min, min_val, 0, max_val, color_max]
#set the min, mid and max values:
else:
ticks = [color_min, min_val, (color_max- color_min)/2., max_val, color_max]
#colourbar:
if IMPORTED_MPL_TOOLKITS:
cb = fig.colorbar(im, cax=ax_cb, orientation='horizontal',
cmap=cmap,
norm=im.norm,
boundaries=numpy.linspace(color_min, color_max, 256),
ticks=ticks,
format='%.2f')
else:
# the colourbar will be wider than the matrix
cb = fig.colorbar(im, orientation='horizontal',
cmap=cmap,
norm=im.norm,
boundaries=numpy.linspace(color_min, color_max, 256),
ticks=ticks,
format='%.2f')
fig.sca(ax)
return fig
def plot_fast_kde(x, y, kern_nx = None, kern_ny = None, gridsize=(500, 500),
extents=None, nocorrelation=False, weights=None, norm = True, pdf=False, **kwargs):
"""
A faster gaussian kernel density estimate (KDE). Intended for
computing the KDE on a regular grid (different use case than
scipy's original scipy.stats.kde.gaussian_kde()).
Author: Joe Kington
License: MIT License <http://www.opensource.org/licenses/mit-license.php>
Performs a gaussian kernel density estimate over a regular grid using a
convolution of the gaussian kernel with a 2D histogram of the data.
This function is typically several orders of magnitude faster than
scipy.stats.kde.gaussian_kde for large (>1e7) numbers of points and
produces an essentially identical result.
**Input**:
*x*: array
The x-coords of the input data points
*y*: array
The y-coords of the input data points
*kern_nx*: float
size (in units of *x*) of the kernel
*kern_ny*: float
size (in units of *y*) of the kernel
*gridsize*: (Nx , Ny) tuple (default: 500x500)
Size of the output grid
*extents*: (default: extent of input data) A (xmin, xmax, ymin, ymax)
tuple of the extents of output grid
*nocorrelation*: (default: False) If True, the correlation between the
x and y coords will be ignored when preforming the KDE.
*weights*: (default: None) An array of the same shape as x & y that
weighs each sample (x_i, y_i) by each value in weights (w_i).
Defaults to an array of ones the same size as x & y.
*norm*: boolean (default: False)
If False, the output is only corrected for the kernel. If True,
the result is normalized such that the integral over the area
yields 1.
**Output**:
A gridded 2D kernel density estimate of the input points.
"""
#---- Setup --------------------------------------------------------------
x, y = numpy.asarray(x), numpy.asarray(y)
x, y = numpy.squeeze(x), numpy.squeeze(y)
if x.size != y.size:
raise ValueError('Input x & y arrays must be the same size!')
nx, ny = gridsize
n = x.size
if weights is None:
# Default: Weight all points equally
weights = numpy.ones(n)
else:
weights = numpy.squeeze(numpy.asarray(weights))
if weights.size != x.size:
raise ValueError('Input weights must be an array of the same size'
' as input x & y arrays!')
# Default extents are the extent of the data
if extents is None:
xmin, xmax = x.min(), x.max()
ymin, ymax = y.min(), y.max()
else:
xmin, xmax, ymin, ymax = map(float, extents)
dx = (xmax - xmin) / (nx - 1)
dy = (ymax - ymin) / (ny - 1)
#---- Preliminary Calculations -------------------------------------------
# First convert x & y over to pixel coordinates
# (Avoiding np.digitize due to excessive memory usage!)
xyi = numpy.vstack((x,y)).T
xyi -= [xmin, ymin]
xyi /= [dx, dy]
xyi = numpy.floor(xyi, xyi).T
# Next, make a 2D histogram of x & y
# Avoiding np.histogram2d due to excessive memory usage with many points
grid = sp.sparse.coo_matrix((weights, xyi), shape=(nx, ny)).toarray()
# Calculate the covariance matrix (in pixel coords)
cov = numpy.cov(xyi)
if nocorrelation:
cov[1,0] = 0
cov[0,1] = 0
# Scaling factor for bandwidth
scotts_factor = numpy.power(n, -1.0 / 6) # For 2D
#---- Make the gaussian kernel -------------------------------------------
# First, determine how big the kernel needs to be
std_devs = numpy.diag(numpy.sqrt(cov))
if kern_nx is None or kern_ny is None:
kern_nx, kern_ny = numpy.round(scotts_factor * 2 * numpy.pi * std_devs)
else:
kern_nx = numpy.round(kern_nx / dx)
kern_ny = numpy.round(kern_ny / dy)
# Determine the bandwidth to use for the gaussian kernel
inv_cov = numpy.linalg.inv(cov * scotts_factor**2)
# x & y (pixel) coords of the kernel grid, with <x,y> = <0,0> in center
xx = numpy.arange(kern_nx, dtype=numpy.float) - kern_nx / 2.0
yy = numpy.arange(kern_ny, dtype=numpy.float) - kern_ny / 2.0
xx, yy = numpy.meshgrid(xx, yy)
# Then evaluate the gaussian function on the kernel grid
kernel = numpy.vstack((xx.flatten(), yy.flatten()))
kernel = numpy.dot(inv_cov, kernel) * kernel
kernel = numpy.sum(kernel, axis=0) / 2.0
kernel = numpy.exp(-kernel)
kernel = kernel.reshape((kern_ny, kern_nx))
#---- Produce the kernel density estimate --------------------------------
# Convolve the gaussian kernel with the 2D histogram, producing a gaussian
# kernel density estimate on a regular grid
grid = sp.signal.convolve2d(grid, kernel, mode='same', boundary='fill').T
# Normalization factor to divide result by so that units are in the same
# units as scipy.stats.kde.gaussian_kde's output.
norm_factor = 2 * numpy.pi * cov * scotts_factor**2
norm_factor = numpy.linalg.det(norm_factor)
#norm_factor = n * dx * dy * np.sqrt(norm_factor)
norm_factor = numpy.sqrt(norm_factor)
if norm :
norm_factor *= n * dx * dy
#---- Produce pdf --------------------------------
if pdf:
norm_factor, _ = sp.integrate.nquad(grid, [[xmin, xmax], [ymin, ymax]])
# Normalize the result
grid /= norm_factor
return grid
#import pdb; pdb.set_trace()
##----------------------------------------------------------------------------##
##- mayavi based plotting functions -##
##----------------------------------------------------------------------------##
try:
from mayavi import mlab
IMPORTED_MAYAVI = True
except ImportError:
LOG.error("Mayavi is needed for this demo but due to sizing and packaging constraints we are not distributing it. "
"If you want to see the actual plot you should use the github version and install all the required "
"dependencies as described here: (advanced users only)"
"http://docs.thevirtualbrain.com/advanced/link_installation_build.html")
IMPORTED_MAYAVI = False
#raise
if IMPORTED_MAYAVI:
@mlab.animate(delay=41, ui=True)
def surface_timeseries(surface, data, step=1):
"""
"""
fig = mlab.figure(figure="surface_timeseries", fgcolor=(0.5, 0.5, 0.5))
#Plot an initial surface and colourbar #TODO: Change to use plot_surface function, see below.
surf_mesh = mlab.triangular_mesh(surface.vertices[:, 0],
surface.vertices[:, 1],
surface.vertices[:, 2],
surface.triangles,
scalars=data[0, :],
vmin=data.min(), vmax=data.max(),
figure=fig)
mlab.colorbar(object=surf_mesh, orientation="vertical")
#Handle for the surface object and figure
surf = surf_mesh.mlab_source
#Time #TODO: Make actual time rather than points, where/if possible.
tpts = data.shape[0]
time_step = mlab.text(0.85, 0.125, ("0 of %s" % str(tpts)),
width=0.0625, color=(1, 1, 1), figure=fig,
name="counter")
#Movie
k = 0
while 1:
if abs(k) >= tpts:
k = 0
surf.set(scalars=data[k, :])
time_step.set(text=("%s of %s" % (str(k), str(tpts))))
k += step
yield
mlab.show()
#--------------------------------------------------------------------------#
#TODO: Make, posssibly with a wrapper function, to work directly with
# SurfacePattern object... Inner function name plot_surface
def plot_surface(surface, fig=None, name=None, op=1.0, rep='surface'):
"""
"""
if fig is None:
fig = mlab.figure(figure=name, fgcolor=(0.5, 0.5, 0.5))
surf_mesh = mlab.triangular_mesh(surface.vertices[:, 0],
surface.vertices[:, 1],
surface.vertices[:, 2],
surface.triangles,
color=(0.7, 0.67, 0.67),
opacity=op,
representation=rep,
figure=fig)
return surf_mesh
def surface_orientation(surface, normals="triangles", name=None):
"""
"""
fig = mlab.figure(figure=name, fgcolor=(0.5, 0.5, 0.5))
surf_mesh = mlab.triangular_mesh(surface.vertices[:, 0],
surface.vertices[:, 1],
surface.vertices[:, 2],
surface.triangles,
color=(0.7, 0.67, 0.67),
figure=fig)
if normals == "triangles":
surf_orient = mlab.quiver3d(surface.triangle_centres[:, 0],
surface.triangle_centres[:, 1],
surface.triangle_centres[:, 2],
surface.triangle_normals[:, 0],
surface.triangle_normals[:, 1],
surface.triangle_normals[:, 2])
elif normals == "vertices":
surf_orient = mlab.quiver3d(surface.vertices[:, 0],
surface.vertices[:, 1],
surface.vertices[:, 2],
surface.vertex_normals[:, 0],
surface.vertex_normals[:, 1],
surface.vertex_normals[:, 2])
else:
LOG.error("normals must be either 'triangles' or 'vertices'")
return (surf_mesh, surf_orient)
def surface_parcellation(cortex_boundaries, colouring, mapping_colours, colour_rgb, interaction=False):
"""
"""
number_of_vertices = cortex_boundaries.cortex.vertices.shape[0]
number_of_triangles = cortex_boundaries.cortex.triangles.shape[0]
number_of_regions = len(cortex_boundaries.region_neighbours)
alpha = 255
lut = numpy.zeros((number_of_regions, 4), dtype=numpy.uint8)
for k in range(number_of_regions):
lut[k] = numpy.hstack((colour_rgb[mapping_colours[colouring[k]]], alpha))
fig = mlab.figure(figure="surface parcellation", bgcolor=(0.0, 0.0, 0.0), fgcolor=(0.5, 0.5, 0.5))
surf_mesh = mlab.triangular_mesh(cortex_boundaries.cortex.vertices[:number_of_vertices//2, 0],
cortex_boundaries.cortex.vertices[:number_of_vertices//2, 1],
cortex_boundaries.cortex.vertices[:number_of_vertices//2, 2],
cortex_boundaries.cortex.triangles[:number_of_triangles//2, :],
scalars=cortex_boundaries.cortex.region_mapping[:number_of_vertices//2],
figure=fig)
surf_mesh.module_manager.scalar_lut_manager.lut.number_of_colors = number_of_regions
#surf_mesh.module_manager.scalar_lut_manager.lut.table = lut
#TODO: can't get region labels to associate with colorbar...
#mlab.colorbar(object=surf_mesh, orientation="vertical")
x = cortex_boundaries.boundary[:, 0]
y = cortex_boundaries.boundary[:, 1]
z = cortex_boundaries.boundary[:, 2]
bpts = mlab.points3d(x, y, z, color=(0.25, 0.25, 0.25), scale_factor=1)
mlab.show(stop=interaction)
return surf_mesh, bpts
def surface_pattern(surface, vertex_colours, custom_lut = None, foci=None):
"""
Plot a surface and colour it based on a vector of length number of
vertices (vertex_colours).
* How to obtain a pretty picture (from Mayavi's gui):
- set surf_mesh color to rgb(237, 217, 221)
- add a surface module derived from surf_mesh; set 'Actor'
representation to wireframe; colour 'gray'.
- enable contours of scalar_surf
"""
#surf_mesh = plot_surface(surface, name="surface pattern")
fig = mlab.figure(figure="surface pattern", fgcolor=(0.5, 0.5, 0.5))
surf_mesh = mlab.triangular_mesh(surface.vertices[:, 0],
surface.vertices[:, 1],
surface.vertices[:, 2],
surface.triangles,
figure=fig)
sm_obj = surf_mesh.mlab_source
scalar_data = surf_mesh.mlab_source.dataset.point_data
scalar_data.scalars = vertex_colours
scalar_data.scalars.name = 'Scalar data'
scalar_data.update()
scalar_mesh = mlab.pipeline.set_active_attribute(surf_mesh, point_scalars='Scalar data')
scalar_surf = mlab.pipeline.surface(scalar_mesh)
if custom_lut is not None:
# and finally we put this LUT back in the surface object. We could have
# added any 255*4 array rather than modifying an existing LUT.
scalar_surf.module_manager.scalar_lut_manager.lut.table = custom_lut
if foci is not None:
pts = mlab.points3d(foci[:,0],
foci[:,1],
foci[:,2],
scale_factor = 2.,
scale_mode = 'none',
resolution = 5,
opacity=0.01)
mlab.show(stop=True)
return sm_obj
def xmas_balls(connectivity,
labels=True, labels_indices=None,
balls_colormap='Blues',
bgcolor = (0.5, 0.5, 0.5),
node_data=None, node_size=4.,
edge_data=True, edge_color=(0.8, 0.8, 0.8), edge_size=0.2,
text_size=0.042, text_color=(0, 0, 0),
remove_nodes=False, nbunch=[],
remove_edges=False, ebunch=[]):
"""
Plots coloured balls at the region centres of connectivity, colour and
size is determined by a vector of length number of regions (node_data).
Optional: adds the connections between pair of nodes.
"""
mlab.figure(1, bgcolor=bgcolor)
# Get graph
G = nx.from_numpy_matrix(numpy.matrix(connectivity.weights))
# Get the subgraph of nodes in nbunch
if remove_nodes:
G.remove_nodes_from([n for n in G if n not in set(nbunch)])
#G.remove_nodes_from([node for node,degree in G.degree().items() if degree < 2])
if remove_edges:
G.remove_edges_from([e for e in G.edges() if e not in ebunch])
# scalar colors
if node_data is not None:
scalars = node_data
#mlab.colorbar(orientation="vertical")
else:
scalars = numpy.array(G.nodes())*20
pts = mlab.points3d(connectivity.centres[:,0],
connectivity.centres[:,1],
connectivity.centres[:,2],
scalars,
#mask_points=1,
scale_factor = node_size,
scale_mode = 'none',
colormap = balls_colormap,
resolution = 5,
opacity=0.01)
if labels:
if labels_indices is not None:
for i, (idx) in enumerate(labels_indices):
x = connectivity.centres[idx, 0]
y = connectivity.centres[idx, 1]
z = connectivity.centres[idx, 2]
label = mlab.text(x, y, connectivity.region_labels[idx],
z=z,
width=text_size,
name=str(connectivity.region_labels[idx]),
color=text_color)
label.property.shadow = False
else:
for i, (x, y, z) in enumerate(connectivity.centres):
label = mlab.text(x, y, connectivity.region_labels[i],
z=z,
width=text_size,
name=str(connectivity.region_labels[i]),
color=text_color)
label.property.shadow = False
if edge_data:
pts.mlab_source.dataset.lines = numpy.array(G.edges())
tube = mlab.pipeline.tube(pts, tube_radius = edge_size)
mlab.pipeline.surface(tube, color=edge_color, representation='wireframe', opacity=0.3)
#mlab.show()
# stop the scene
#mlab.show(stop=True)
def connectivity_3d(connectivity, order=None, edge_cutoff=None):
"""
Plots a 3D representation of the delayed-connectivity structure.
See Fig. 3 in (Knock et al 2009)
[Nodes x Nodes x Delays]
Original script can be found at:
BrainNetworkModels_3.1/PlottingTools/PlotConnectivity3D.m
"""
fig = mlab.figure(figure="Connectivity 3D", bgcolor=(0.0, 0.0, 0.0))
N = connectivity.number_of_regions // 2
minW = connectivity.weights.min()
maxW = connectivity.weights.max()
if connectivity.delays is None:
connectivity.configure()
minD = connectivity.delays.min()
maxD = connectivity.delays.max()
stepD = (maxD - minD) / 10.
if order is None:
order = numpy.arange(0, N)
if edge_cutoff is None:
edge_cutoff = minW
# colourmap to emphasise large numbers
#MAP = numpy.loadtxt('../plot/colourmaps/BlackToBlue')
#mapstep = 1. / MAP.shape[0]
# Loop over connectivity matrix, colouring and one cube per matrix element
K = []
D = []
M = []
S = []
for k in range(N):
for m in range(N):
if connectivity.weights[k, m] != 0:
if k != m:
#not self connection (diagonal)
if connectivity.weights[k, m] > edge_cutoff:
K.append(k + 2.)
D.append(connectivity.delays[k, m] + stepD)
M.append(m + 2.0)
S.append(connectivity.weights[k, m])
mlab.points3d(K, D, M, S, mode='cube')
mlab.show(stop=True)
#--------------------------------------------------------------------------#
if __name__ == '__main__':
# Do some stuff that tests or makes use of this module...
pass
##- EoF -##
|
rajul/tvb-library
|
tvb/simulator/plot/tools.py
|
Python
|
gpl-2.0
| 37,849
|
[
"Gaussian",
"Mayavi"
] |
69144efd17b94943be1833d4adfe3fa516d2b3a30e440777feaa0cb304a7494d
|
#-*- encoding: utf-8 -*-
from urllib2 import Request, urlopen, URLError
from urllib import urlencode
from cStringIO import StringIO
from gzip import GzipFile
import time, sys, os
import argparse
def getArgs():
parser = argparse.ArgumentParser()
parser.add_argument('url', help="the url to visit")
parser.add_argument('-c', '--cookie', help='cookie string, or cookie @file', type=str)
args = parser.parse_args()
if not args.cookie:
print "请输入cookie:"
args.cookie = sys.stdin.readline().strip()
return args
print args.url, args.cookie
|
dlutxx/memo
|
curl/test.py
|
Python
|
mit
| 586
|
[
"VisIt"
] |
cde3d2a7d8a4994ead4307761233d8bae4b3acf60f5f726ff95da6fccfcf8b46
|
# Copyright 2004 by Bob Bussell. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""NOEtools: For predicting NOE coordinates from assignment data.
The input and output are modelled on nmrview peaklists.
This modules is suitable for directly generating an nmrview
peaklist with predicted crosspeaks directly from the
input assignment peaklist.
"""
from . import xpktools
def predictNOE(peaklist, originNuc, detectedNuc, originResNum, toResNum):
"""Predict the i->j NOE position based on self peak (diagonal) assignments
Parameters
----------
peaklist : xprtools.Peaklist
List of peaks from which to derive predictions
originNuc : str
Name of originating nucleus.
originResNum : int
Index of originating residue.
detectedNuc : str
Name of detected nucleus.
toResNum : int
Index of detected residue.
Returns
-------
returnLine : str
The .xpk file entry for the predicted crosspeak.
Examples
--------
Using predictNOE(peaklist,"N15","H1",10,12)
where peaklist is of the type xpktools.peaklist
would generate a .xpk file entry for a crosspeak
that originated on N15 of residue 10 and ended up
as magnetization detected on the H1 nucleus of
residue 12
Notes
=====
The initial peaklist is assumed to be diagonal (self peaks only)
and currently there is no checking done to insure that this
assumption holds true. Check your peaklist for errors and
off diagonal peaks before attempting to use predictNOE.
"""
returnLine = "" # The modified line to be returned to the caller
datamap = _data_map(peaklist.datalabels)
# Construct labels for keying into dictionary
originAssCol = datamap[originNuc + ".L"] + 1
originPPMCol = datamap[originNuc + ".P"] + 1
detectedPPMCol = datamap[detectedNuc + ".P"] + 1
# Make a list of the data lines involving the detected
if str(toResNum) in peaklist.residue_dict(detectedNuc) \
and str(originResNum) in peaklist.residue_dict(detectedNuc):
detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)]
originList = peaklist.residue_dict(detectedNuc)[str(originResNum)]
returnLine = detectedList[0]
for line in detectedList:
aveDetectedPPM = _col_ave(detectedList, detectedPPMCol)
aveOriginPPM = _col_ave(originList, originPPMCol)
originAss = originList[0].split()[originAssCol]
returnLine = xpktools.replace_entry(returnLine, originAssCol + 1, originAss)
returnLine = xpktools.replace_entry(returnLine, originPPMCol + 1, aveOriginPPM)
return returnLine
def _data_map(labelline):
# Generate a map between datalabels and column number
# based on a labelline
i = 0 # A counter
datamap = {} # The data map dictionary
labelList = labelline.split() # Get the label line
# Get the column number for each label
for i in range(len(labelList)):
datamap[labelList[i]] = i
return datamap
def _col_ave(list, col):
# Compute average values from a particular column in a string list
total = 0.0
n = 0
for element in list:
total += float(element.split()[col])
n += 1
return total / n
|
zjuchenyuan/BioWeb
|
Lib/Bio/NMR/NOEtools.py
|
Python
|
mit
| 3,420
|
[
"Biopython"
] |
ee15a5cc23f96c4f2d2d49756a039077431d16dca663312d64ad53dbb6b5bdef
|
# Copyright (C) 2014 Jian-Ming Tang <jmtang@mailaps.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Figure3D
--------
"""
import math
import numpy
from vtk import *
class Figure3D:
"""
This class creates vtk objects for 3D viewing.
The class elements contain vtkStructuredGrid, vtkActor, vtkRenderer, vtkRenderWindow
"""
def __init__(self, axes, data):
self.vmax = max(data)
self.box = axes[-1]
val = vtkFloatArray()
val.SetVoidArray(data, len(data), 1)
points = vtkPoints()
for x in axes:
for y in axes:
for z in axes:
points.InsertNextPoint(z, y, x)
ng = len(axes)
self.sgrid = vtkStructuredGrid()
self.sgrid.SetDimensions(ng, ng, ng)
self.sgrid.SetPoints(points)
self.sgrid.GetPointData().SetScalars(val)
def set_up_color(self):
color = vtkDoubleArray()
color.SetName("Color")
color.SetNumberOfComponents(3)
Lx = numpy.linspace(-1, 1, 101)
for x in Lx:
for y in Lx:
for z in Lx:
r = math.sqrt(x * x + y * y + z * z)
color.InsertNextTuple3((0.4 - r) * .1, 0, 0)
self.sgrid.GetPointData().SetVectors(color)
def set_up_iso_surface(self, isovalue):
iso = vtkContourFilter()
iso.SetInput(self.sgrid)
iso.SetValue(0, isovalue)
normals = vtkPolyDataNormals()
normals.SetInput(iso.GetOutput())
# normals.SetFeatureAngle(30);
isoMapper = vtkPolyDataMapper()
isoMapper.SetInput(normals.GetOutput())
isoMapper.SetScalarRange(0, self.vmax)
isoMapper.SetScalarModeToUsePointFieldData()
isoMapper.SetColorModeToMapScalars()
isoMapper.SelectColorArray("Color")
self.Actor_iso = vtkActor()
self.Actor_iso.SetMapper(isoMapper)
def add_other_stuff(self):
r = self.box
# add Y axis
self.Actor_axis = vtkAxisActor()
self.Actor_axis.SetPoint1(-r, -r, -r)
self.Actor_axis.SetPoint2(-r, r, -r)
self.Actor_axis.SetAxisTypeToY()
self.Actor_axis.SetMajorStart(-(r // 5) * 5)
self.Actor_axis.SetMinorStart(-math.floor(r))
self.Actor_axis.SetDeltaMajor(5)
self.Actor_axis.SetDeltaMinor(1)
self.Actor_axis.GetProperty().SetColor(0, 0, 0)
self.Actor_axis.GetProperty().SetLineWidth(2)
# add axes
self.Actor_axes = vtkAxesActor()
tf = vtk.vtkTransform()
Ll = r * 0.5
lr = r * 0.003
tf.Translate(-r, -r, -r)
self.Actor_axes.SetUserTransform(tf)
self.Actor_axes.SetShaftTypeToCylinder()
self.Actor_axes.SetTotalLength(Ll, Ll, Ll)
self.Actor_axes.SetCylinderRadius(lr)
self.Actor_axes.SetConeRadius(lr * 10)
xca = self.Actor_axes.GetXAxisCaptionActor2D()
self.Actor_axes.SetXAxisLabelText('x')
self.Actor_axes.SetYAxisLabelText('y')
self.Actor_axes.SetZAxisLabelText('z')
self.Actor_axes.SetNormalizedLabelPosition(1.2, 1, 1.2)
tp = vtkTextProperty()
tp.SetColor(0, 0, 0)
self.Actor_axes.GetXAxisCaptionActor2D().SetCaptionTextProperty(tp)
self.Actor_axes.GetYAxisCaptionActor2D().SetCaptionTextProperty(tp)
self.Actor_axes.GetZAxisCaptionActor2D().SetCaptionTextProperty(tp)
# add a bounding box
box = vtkStructuredGridOutlineFilter()
box.SetInput(self.sgrid)
Mapper_box = vtkPolyDataMapper()
Mapper_box.SetInput(box.GetOutput())
self.Actor_box = vtkActor()
self.Actor_box.SetMapper(Mapper_box)
self.Actor_box.GetProperty().SetColor(0, 0, 0)
self.Actor_box.GetProperty().SetLineWidth(1)
def rendering(self):
self.ren = vtkRenderer()
self.ren.AddActor(self.Actor_iso)
self.ren.AddActor(self.Actor_axes)
self.ren.AddActor(self.Actor_axis)
self.ren.AddActor(self.Actor_box)
self.ren.SetBackground(1, 1, 1)
r = self.box
self.ren.ResetCameraClippingRange(-r, r, -r, r, -r, r)
cam = self.ren.GetActiveCamera()
cam.SetFocalPoint(0, 0, 0)
cam.SetPosition(r * 6, r * 2, r * 0.5)
cam.SetViewUp(0, 0, 1)
def show_on_screen(self, title):
self.renWin = vtkRenderWindow()
self.renWin.AddRenderer(self.ren)
self.renWin.SetSize(500, 500)
self.renWin.SetWindowName(title)
iren = vtkRenderWindowInteractor()
iren.SetRenderWindow(self.renWin)
iren.Initialize()
self.renWin.Render()
iren.Start()
def save_png(self, fname):
renderLarge = vtkRenderLargeImage()
renderLarge.SetInput(self.ren)
renderLarge.SetMagnification(1)
writer = vtkPNGWriter()
writer.SetInput(renderLarge.GetOutput())
writer.SetFileName(fname)
writer.Write()
print 'image saved'
|
jianmingtang/PIC-tools
|
Python/Figure/Figure3D.py
|
Python
|
gpl-3.0
| 5,619
|
[
"VTK"
] |
142e40ced68dbb1f8c73dcd8d164dd11149a1d7e8b5d242bb4718add1f21bd92
|
##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Kyle A. Beauchamp
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from mdtraj.testing import get_fn, eq
from mdtraj.geometry import alignment
import numpy as np
xyz1 = np.array([[1,0,0],[0,1,0],[0,0,1],[1,1,0],[1,0,1],[0,1,1.0]])
offset = 1.0 * np.random.normal(size=(3))
rotation = np.array([[1,0,0],[0,0,-1],[0,1,0]])
xyz2 = rotation.dot(xyz1.T).T + offset
xyz3 = xyz1 + np.random.normal(size=xyz1.shape)
def test_rmsd_zero():
rmsd_kabsch = alignment.rmsd_kabsch(xyz1, xyz2)
rmsd_qcp = alignment.rmsd_qcp(xyz1, xyz2)
eq(float(rmsd_kabsch), 0.0, decimal=5)
eq(float(rmsd_qcp), 0.0, decimal=5)
def test_rmsd_nonzero():
rmsd_kabsch = alignment.rmsd_kabsch(xyz1, xyz3)
rmsd_qcp = alignment.rmsd_qcp(xyz1, xyz3)
eq(rmsd_kabsch, rmsd_qcp, decimal=5)
def test_transform():
T = alignment.compute_transformation(xyz2, xyz1)
xyz2_prime = T.transform(xyz2)
eq(xyz1, xyz2_prime)
|
ctk3b/mdtraj
|
mdtraj/geometry/tests/test_alignment.py
|
Python
|
lgpl-2.1
| 1,884
|
[
"MDTraj"
] |
683a7ea0cf91826da8b8944fdc43d0ff2f6d25882d6a31cae5babc6d68703136
|
from go_to_adjacent_systems import *
from go_somewhere_significant import *
import vsrandom
import launch
import faction_ships
import VS
import Briefing
import universe
import unit
import Director
import quest
import gettext
class defend (Director.Mission):
def __init__ (self,factionname,numsystemsaway, enemyquantity, distance_from_base, escape_distance, creds, defendthis, defend_base,protectivefactionname='',jumps=(),var_to_set='',dynamic_flightgroup='',dynamic_type='', dynamic_defend_fg='',waves=0, greetingText=[_('We will defeat your assets in this battle, privateer...'),_('Have no doubt!')]):
Director.Mission.__init__(self)
self.dedicatedattack=vsrandom.randrange(0,2)
self.arrived=0
self.waves=waves;
self.greetingText=greetingText
self.protectivefaction = protectivefactionname
self.var_to_set=var_to_set
self.quantity=0
self.mplay="all"
self.defendbase = defend_base
self.dynatkfg = dynamic_flightgroup
self.dynatktype = dynamic_type
self.dyndeffg = dynamic_defend_fg
self.attackers = []
self.objective= 0
self.targetiter = 0
self.ship_check_count=0
self.defend = defendthis
self.defend_base = defend_base
self.faction = factionname
self.escdist = escape_distance
minsigdist=unit.minimumSigDistApart()
if (minsigdist*.5<self.escdist):
self.escdist = minsigdist
self.cred=creds
self.respawn=0
self.quantity=enemyquantity
self.savedquantity=enemyquantity
self.distance_from_base=distance_from_base
self.defendee=VS.Unit()
self.difficulty=1
self.you=VS.getPlayer()
name = self.you.getName ()
self.mplay=universe.getMessagePlayer(self.you)
self.adjsys = go_to_adjacent_systems(self.you,numsystemsaway,jumps)
self.adjsys.Print(_("You are in the %s system,"),_("Proceed swiftly to %s."),_("Your arrival point is %s."),"defend",1)
VS.IOmessage (2,"defend",self.mplay,_("And there eliminate any %s starships.") % self.faction)
def SetVarValue (self,value):
if (self.var_to_set!=''):
quest.removeQuest (self.you.isPlayerStarship(),self.var_to_set,value)
def SuccessMission (self):
self.you.addCredits (self.cred)
VS.AdjustRelation(self.you.getFactionName(),self.faction,.03,1)
self.SetVarValue(1)
VS.IOmessage(0,"defend",self.mplay,_("[Computer] Defend mission accomplished"))
if (self.cred>0):
VS.IOmessage(0,"defend",self.mplay,_("[Computer] Bank account has been credited as agreed."))
VS.terminateMission(1)
def FailMission (self):
self.you.addCredits (-self.cred)
VS.AdjustRelation(self.you.getFactionName(),self.faction,-.02,1)
self.SetVarValue(-1)
VS.IOmessage (0,"defend",self.mplay,_("[Computer] Detected failure to protect mission asset."))
VS.IOmessage (0,"defend",self.mplay,_("[Computer] Mission failed!"))
VS.IOmessage (1,"defend",self.mplay,_("[Computer] Bank has been informed of failure to assist asset. They have removed a number of your credits as a penalty to help pay target insurance."))
VS.terminateMission(0)
def NoEnemiesInArea (self,jp):
if (self.adjsys.DestinationSystem()!=VS.getSystemFile()):
return 0
if (self.ship_check_count>=len(self.attackers)):
VS.setCompleteness(self.objective,1.0)
return 1
un= self.attackers[self.ship_check_count]
self.ship_check_count+=1
if (un.isNull() or (un.GetHullPercent()<.7 and self.defendee.getDistance(un)>7000)):
return 0
else:
VS.setObjective(self.objective,_("Destroy the %s")%unit.getUnitFullName(un))
self.ship_check_count=0
return 0
def GenerateEnemies (self,jp,you):
VS.IOmessage (0,"escort mission",self.mplay,_("You must protect %s.") % unit.getUnitFullName(jp,True))
count=0
jp.setMissionRelevant()
VS.addObjective (_("Protect %s from the %s") % (unit.getUnitFullName(jp),self.faction.capitalize().replace("_"," ")))
self.objective = VS.addObjective (_("Destroy All %s Hostiles") % self.faction)
VS.setCompleteness(self.objective,0.0)
print("quantity "+str(self.quantity))
while (count<self.quantity):
L = launch.Launch()
L.fg="Shadow";L.dynfg=self.dynatkfg;
if (self.dynatktype==''):
L.type=faction_ships.getRandomFighter(self.faction)
else:
L.type=self.dynatktype
L.ai="default";L.num=1;L.minradius=2000.0;L.maxradius=4500.0
try:
L.minradius*=faction_ships.launch_distance_factor
L.maxradius*=faction_ships.launch_distance_factor
except:
pass
L.faction=self.faction
launched=L.launch(you)
if (count==0):
self.you.SetTarget(launched)
if (self.defend):
launched.SetTarget (jp)
else:
launched.SetTarget (you)
if (self.dedicatedattack):
launched.setFgDirective('B')
self.attackers += [ launched ]
count+=1
if (self.respawn==0 and len(self.attackers)>0):
self.respawn=1
import universe
universe.greet(self.greetingText,self.attackers[0],you);
else:
VS.IOmessage (0,"escort mission",self.mplay,_("Eliminate all %s ships here") % self.faction)
self.quantity=0
def Execute (self):
if (self.you.isNull() or (self.arrived and self.defendee.isNull())):
VS.IOmessage (0,"defend",self.mplay,_("#ff0000You were unable to arrive in time to help. Mission failed."))
self.SetVarValue(-1)
VS.terminateMission(0)
return
if (not self.adjsys.Execute()):
return
if (not self.arrived):
self.arrived=1
tempfaction=''
if (self.defend_base):
tempfaction=self.protectivefaction
if (tempfaction==''):
tempfaction = faction_ships.get_enemy_of(self.faction)
self.adjsys=go_somewhere_significant (self.you,self.defend_base or self.defend,self.distance_from_base,self.defend,tempfaction,self.dyndeffg,1,not self.defend_base)
self.adjsys.Print (_("You must visit the %s"),"defend","near the %s", 0)
self.defendee=self.adjsys.SignificantUnit()
else:
if (self.defendee.isNull ()):
if (self.defend):
self.FailMission(you)
else:
self.SuccessMission()
return
else:
if (self.quantity>0):
self.GenerateEnemies (self.defendee,self.you)
if (self.ship_check_count==0 and self.dedicatedattack):
if (self.targetiter>=len(self.attackers)):
self.targetiter=0
else:
un = self.attackers[self.targetiter]
if (not un.isNull()):
if (self.defend):# if (not un.isNull()
if (un.getName() in faction_ships.isBomber):
if (self.quantity >= faction_ships.isBomber[un.getName()]):
un.SetTarget (self.defendee)
else:
un.setFgDirective('b')
else:
un.setFgDirective('b')
else:
un.SetTarget (self.you)
self.targetiter=self.targetiter+1
if (self.NoEnemiesInArea (self.defendee)):
if (self.waves>0):
self.quantity=self.savedquantity
self.waves-=1
else:
self.SuccessMission()
def initbriefing(self):
print("ending briefing")
def loopbriefing(self):
print("loop briefing")
Briefing.terminate();
def endbriefing(self):
print("ending briefing")
def initrandom(factionname,numsysaway,minenquant,maxenquant,credperen,defendit,defend_base,p_faction='',jumps=(),var_to_set=''):
enq=minenquant
enq=vsrandom.uniform(minenquant,maxenquant)
return defend(factionname,numsysaway,enq,8000.0,100000.0,enq*credperen,defendit,defend_base,p_faction,jumps,var_to_set)
|
costalfy/Vega-Strike
|
data/modules/missions/defend.py
|
Python
|
gpl-2.0
| 8,780
|
[
"VisIt"
] |
f3d9ee8fa82faeccab6e64b76b6a93e9e7a7d8f21012528b15724a4c0d4f7992
|
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from html5lib import treebuilders, inputstream
from xhtml2pdf.default import TAGS, STRING, INT, BOOL, SIZE, COLOR, FILE
from xhtml2pdf.default import BOX, POS, MUST, FONT
from xhtml2pdf.util import getSize, getBool, toList, getColor, getAlign
from xhtml2pdf.util import getBox, getPos, pisaTempFile
from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak
from reportlab.platypus.flowables import PageBreak, KeepInFrame
from xhtml2pdf.xhtml2pdf_reportlab import PmlRightPageBreak, PmlLeftPageBreak
from xhtml2pdf.tags import * # TODO: Kill wild import!
from xhtml2pdf.tables import * # TODO: Kill wild import!
from xhtml2pdf.util import * # TODO: Kill wild import!
from xml.dom import Node
import copy
import html5lib
import logging
import re
import types
import xhtml2pdf.w3c.cssDOMElementInterface as cssDOMElementInterface
import xml.dom.minidom
CSSAttrCache = {}
log = logging.getLogger("xhtml2pdf")
rxhttpstrip = re.compile("https?://[^/]+(.*)", re.M | re.I)
class AttrContainer(dict):
def __getattr__(self, name):
try:
return dict.__getattr__(self, name)
except:
return self[name]
def pisaGetAttributes(c, tag, attributes):
global TAGS
attrs = {}
if attributes:
for k, v in attributes.items():
try:
attrs[str(k)] = str(v) # XXX no Unicode! Reportlab fails with template names
except:
attrs[k] = v
nattrs = {}
if tag in TAGS:
block, adef = TAGS[tag]
adef["id"] = STRING
# print block, adef
for k, v in adef.iteritems():
nattrs[k] = None
# print k, v
# defaults, wenn vorhanden
if type(v) == types.TupleType:
if v[1] == MUST:
if k not in attrs:
log.warn(c.warning("Attribute '%s' must be set!", k))
nattrs[k] = None
continue
nv = attrs.get(k, v[1])
dfl = v[1]
v = v[0]
else:
nv = attrs.get(k, None)
dfl = None
if nv is not None:
if type(v) == types.ListType:
nv = nv.strip().lower()
if nv not in v:
#~ raise PML_EXCEPTION, "attribute '%s' of wrong value, allowed is one of: %s" % (k, repr(v))
log.warn(c.warning("Attribute '%s' of wrong value, allowed is one of: %s", k, repr(v)))
nv = dfl
elif v == BOOL:
nv = nv.strip().lower()
nv = nv in ("1", "y", "yes", "true", str(k))
elif v == SIZE:
try:
nv = getSize(nv)
except:
log.warn(c.warning("Attribute '%s' expects a size value", k))
elif v == BOX:
nv = getBox(nv, c.pageSize)
elif v == POS:
nv = getPos(nv, c.pageSize)
elif v == INT:
nv = int(nv)
elif v == COLOR:
nv = getColor(nv)
elif v == FILE:
nv = c.getFile(nv)
elif v == FONT:
nv = c.getFontName(nv)
nattrs[k] = nv
return AttrContainer(nattrs)
attrNames = '''
color
font-family
font-size
font-weight
font-style
text-decoration
line-height
background-color
display
margin-left
margin-right
margin-top
margin-bottom
padding-left
padding-right
padding-top
padding-bottom
border-top-color
border-top-style
border-top-width
border-bottom-color
border-bottom-style
border-bottom-width
border-left-color
border-left-style
border-left-width
border-right-color
border-right-style
border-right-width
text-align
vertical-align
width
height
zoom
page-break-after
page-break-before
list-style-type
list-style-image
white-space
text-indent
-pdf-page-break
-pdf-frame-break
-pdf-next-page
-pdf-keep-with-next
-pdf-outline
-pdf-outline-level
-pdf-outline-open
-pdf-line-spacing
-pdf-keep-in-frame-mode
-pdf-word-wrap
'''.strip().split()
def getCSSAttr(self, cssCascade, attrName, default=NotImplemented):
if attrName in self.cssAttrs:
return self.cssAttrs[attrName]
try:
result = cssCascade.findStyleFor(self.cssElement, attrName, default)
except LookupError:
result = None
# XXX Workaround for inline styles
try:
style = self.cssStyle
except:
style = self.cssStyle = cssCascade.parser.parseInline(self.cssElement.getStyleAttr() or '')[0]
if attrName in style:
result = style[attrName]
if result == 'inherit':
if hasattr(self.parentNode, 'getCSSAttr'):
result = self.parentNode.getCSSAttr(cssCascade, attrName, default)
elif default is not NotImplemented:
return default
raise LookupError("Could not find inherited CSS attribute value for '%s'" % (attrName,))
if result is not None:
self.cssAttrs[attrName] = result
return result
#TODO: Monkeypatching standard lib should go away.
xml.dom.minidom.Element.getCSSAttr = getCSSAttr
def getCSSAttrCacheKey(node):
_cl = _id = _st = ''
for k, v in node.attributes.items():
if k == 'class':
_cl = v
elif k == 'id':
_id = v
elif k == 'style':
_st = v
return "%s#%s#%s#%s" % (id(node.parentNode), _cl, _id, _st)
def CSSCollect(node, c):
#node.cssAttrs = {}
#return node.cssAttrs
if c.css:
_key = getCSSAttrCacheKey(node)
if hasattr(node.parentNode, "tagName"):
if node.parentNode.tagName.lower() != "html":
CachedCSSAttr = CSSAttrCache.get(_key, None)
if CachedCSSAttr is not None:
return CachedCSSAttr
node.cssElement = cssDOMElementInterface.CSSDOMElementInterface(node)
node.cssAttrs = {}
# node.cssElement.onCSSParserVisit(c.cssCascade.parser)
cssAttrMap = {}
for cssAttrName in attrNames:
try:
cssAttrMap[cssAttrName] = node.getCSSAttr(c.cssCascade, cssAttrName)
#except LookupError:
# pass
except Exception: # TODO: Kill this catch-all!
log.debug("CSS error '%s'", cssAttrName, exc_info=1)
CSSAttrCache[_key] = node.cssAttrs
return node.cssAttrs
def CSS2Frag(c, kw, isBlock):
# COLORS
if "color" in c.cssAttr:
c.frag.textColor = getColor(c.cssAttr["color"])
if "background-color" in c.cssAttr:
c.frag.backColor = getColor(c.cssAttr["background-color"])
# FONT SIZE, STYLE, WEIGHT
if "font-family" in c.cssAttr:
c.frag.fontName = c.getFontName(c.cssAttr["font-family"])
if "font-size" in c.cssAttr:
# XXX inherit
c.frag.fontSize = max(getSize("".join(c.cssAttr["font-size"]), c.frag.fontSize, c.baseFontSize), 1.0)
if "line-height" in c.cssAttr:
leading = "".join(c.cssAttr["line-height"])
c.frag.leading = getSize(leading, c.frag.fontSize)
c.frag.leadingSource = leading
else:
c.frag.leading = getSize(c.frag.leadingSource, c.frag.fontSize)
if "-pdf-line-spacing" in c.cssAttr:
c.frag.leadingSpace = getSize("".join(c.cssAttr["-pdf-line-spacing"]))
# print "line-spacing", c.cssAttr["-pdf-line-spacing"], c.frag.leading
if "font-weight" in c.cssAttr:
value = c.cssAttr["font-weight"].lower()
if value in ("bold", "bolder", "500", "600", "700", "800", "900"):
c.frag.bold = 1
else:
c.frag.bold = 0
for value in toList(c.cssAttr.get("text-decoration", "")):
if "underline" in value:
c.frag.underline = 1
if "line-through" in value:
c.frag.strike = 1
if "none" in value:
c.frag.underline = 0
c.frag.strike = 0
if "font-style" in c.cssAttr:
value = c.cssAttr["font-style"].lower()
if value in ("italic", "oblique"):
c.frag.italic = 1
else:
c.frag.italic = 0
if "white-space" in c.cssAttr:
# normal | pre | nowrap
c.frag.whiteSpace = str(c.cssAttr["white-space"]).lower()
# ALIGN & VALIGN
if "text-align" in c.cssAttr:
c.frag.alignment = getAlign(c.cssAttr["text-align"])
if "vertical-align" in c.cssAttr:
c.frag.vAlign = c.cssAttr["vertical-align"]
# HEIGHT & WIDTH
if "height" in c.cssAttr:
c.frag.height = "".join(toList(c.cssAttr["height"])) # XXX Relative is not correct!
if c.frag.height in ("auto",):
c.frag.height = None
if "width" in c.cssAttr:
c.frag.width = "".join(toList(c.cssAttr["width"])) # XXX Relative is not correct!
if c.frag.width in ("auto",):
c.frag.width = None
# ZOOM
if "zoom" in c.cssAttr:
zoom = "".join(toList(c.cssAttr["zoom"])) # XXX Relative is not correct!
if zoom.endswith("%"):
zoom = float(zoom[: - 1]) / 100.0
c.frag.zoom = float(zoom)
# MARGINS & LIST INDENT, STYLE
if isBlock:
if "margin-top" in c.cssAttr:
c.frag.spaceBefore = getSize(c.cssAttr["margin-top"], c.frag.fontSize)
if "margin-bottom" in c.cssAttr:
c.frag.spaceAfter = getSize(c.cssAttr["margin-bottom"], c.frag.fontSize)
if "margin-left" in c.cssAttr:
c.frag.bulletIndent = kw["margin-left"] # For lists
kw["margin-left"] += getSize(c.cssAttr["margin-left"], c.frag.fontSize)
c.frag.leftIndent = kw["margin-left"]
if "margin-right" in c.cssAttr:
kw["margin-right"] += getSize(c.cssAttr["margin-right"], c.frag.fontSize)
c.frag.rightIndent = kw["margin-right"]
if "text-indent" in c.cssAttr:
c.frag.firstLineIndent = getSize(c.cssAttr["text-indent"], c.frag.fontSize)
if "list-style-type" in c.cssAttr:
c.frag.listStyleType = str(c.cssAttr["list-style-type"]).lower()
if "list-style-image" in c.cssAttr:
c.frag.listStyleImage = c.getFile(c.cssAttr["list-style-image"])
# PADDINGS
if isBlock:
if "padding-top" in c.cssAttr:
c.frag.paddingTop = getSize(c.cssAttr["padding-top"], c.frag.fontSize)
if "padding-bottom" in c.cssAttr:
c.frag.paddingBottom = getSize(c.cssAttr["padding-bottom"], c.frag.fontSize)
if "padding-left" in c.cssAttr:
c.frag.paddingLeft = getSize(c.cssAttr["padding-left"], c.frag.fontSize)
if "padding-right" in c.cssAttr:
c.frag.paddingRight = getSize(c.cssAttr["padding-right"], c.frag.fontSize)
# BORDERS
if isBlock:
if "border-top-width" in c.cssAttr:
c.frag.borderTopWidth = getSize(c.cssAttr["border-top-width"], c.frag.fontSize)
if "border-bottom-width" in c.cssAttr:
c.frag.borderBottomWidth = getSize(c.cssAttr["border-bottom-width"], c.frag.fontSize)
if "border-left-width" in c.cssAttr:
c.frag.borderLeftWidth = getSize(c.cssAttr["border-left-width"], c.frag.fontSize)
if "border-right-width" in c.cssAttr:
c.frag.borderRightWidth = getSize(c.cssAttr["border-right-width"], c.frag.fontSize)
if "border-top-style" in c.cssAttr:
c.frag.borderTopStyle = c.cssAttr["border-top-style"]
if "border-bottom-style" in c.cssAttr:
c.frag.borderBottomStyle = c.cssAttr["border-bottom-style"]
if "border-left-style" in c.cssAttr:
c.frag.borderLeftStyle = c.cssAttr["border-left-style"]
if "border-right-style" in c.cssAttr:
c.frag.borderRightStyle = c.cssAttr["border-right-style"]
if "border-top-color" in c.cssAttr:
c.frag.borderTopColor = getColor(c.cssAttr["border-top-color"])
if "border-bottom-color" in c.cssAttr:
c.frag.borderBottomColor = getColor(c.cssAttr["border-bottom-color"])
if "border-left-color" in c.cssAttr:
c.frag.borderLeftColor = getColor(c.cssAttr["border-left-color"])
if "border-right-color" in c.cssAttr:
c.frag.borderRightColor = getColor(c.cssAttr["border-right-color"])
def pisaPreLoop(node, context, collect=False):
"""
Collect all CSS definitions
"""
data = u""
if node.nodeType == Node.TEXT_NODE and collect:
data = node.data
elif node.nodeType == Node.ELEMENT_NODE:
name = node.tagName.lower()
if name in ("style", "link"):
attr = pisaGetAttributes(context, name, node.attributes)
media = [x.strip() for x in attr.media.lower().split(",") if x.strip()]
if attr.get("type", "").lower() in ("", "text/css") and \
(not media or "all" in media or "print" in media or "pdf" in media):
if name == "style":
for node in node.childNodes:
data += pisaPreLoop(node, context, collect=True)
context.addCSS(data)
return u""
if name == "link" and attr.href and attr.rel.lower() == "stylesheet":
# print "CSS LINK", attr
context.addCSS('\n@import "%s" %s;' % (attr.href, ",".join(media)))
for node in node.childNodes:
result = pisaPreLoop(node, context, collect=collect)
if collect:
data += result
return data
def pisaLoop(node, context, path=None, **kw):
if path is None:
path = []
# Initialize KW
if not kw:
kw = {
"margin-top": 0,
"margin-bottom": 0,
"margin-left": 0,
"margin-right": 0,
}
else:
kw = copy.copy(kw)
# indent = len(path) * " " # only used for debug print statements
# TEXT
if node.nodeType == Node.TEXT_NODE:
# print indent, "#", repr(node.data) #, context.frag
context.addFrag(node.data)
# context.text.append(node.value)
# ELEMENT
elif node.nodeType == Node.ELEMENT_NODE:
node.tagName = node.tagName.replace(":", "").lower()
if node.tagName in ("style", "script"):
return
path = copy.copy(path) + [node.tagName]
# Prepare attributes
attr = pisaGetAttributes(context, node.tagName, node.attributes)
# log.debug(indent + "<%s %s>" % (node.tagName, attr) + repr(node.attributes.items())) #, path
# Calculate styles
context.cssAttr = CSSCollect(node, context)
context.node = node
# Block?
PAGE_BREAK = 1
PAGE_BREAK_RIGHT = 2
PAGE_BREAK_LEFT = 3
pageBreakAfter = False
frameBreakAfter = False
display = context.cssAttr.get("display", "inline").lower()
# print indent, node.tagName, display, context.cssAttr.get("background-color", None), attr
isBlock = (display == "block")
if isBlock:
context.addPara()
# Page break by CSS
if "-pdf-next-page" in context.cssAttr:
context.addStory(NextPageTemplate(str(context.cssAttr["-pdf-next-page"])))
if "-pdf-page-break" in context.cssAttr:
if str(context.cssAttr["-pdf-page-break"]).lower() == "before":
context.addStory(PageBreak())
if "-pdf-frame-break" in context.cssAttr:
if str(context.cssAttr["-pdf-frame-break"]).lower() == "before":
context.addStory(FrameBreak())
if str(context.cssAttr["-pdf-frame-break"]).lower() == "after":
frameBreakAfter = True
if "page-break-before" in context.cssAttr:
if str(context.cssAttr["page-break-before"]).lower() == "always":
context.addStory(PageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "right":
context.addStory(PageBreak())
context.addStory(PmlRightPageBreak())
if str(context.cssAttr["page-break-before"]).lower() == "left":
context.addStory(PageBreak())
context.addStory(PmlLeftPageBreak())
if "page-break-after" in context.cssAttr:
if str(context.cssAttr["page-break-after"]).lower() == "always":
pageBreakAfter = PAGE_BREAK
if str(context.cssAttr["page-break-after"]).lower() == "right":
pageBreakAfter = PAGE_BREAK_RIGHT
if str(context.cssAttr["page-break-after"]).lower() == "left":
pageBreakAfter = PAGE_BREAK_LEFT
if display == "none":
# print "none!"
return
# Translate CSS to frags
# Save previous frag styles
context.pushFrag()
# Map styles to Reportlab fragment properties
CSS2Frag(context, kw, isBlock)
# EXTRAS
if "-pdf-keep-with-next" in context.cssAttr:
context.frag.keepWithNext = getBool(context.cssAttr["-pdf-keep-with-next"])
if "-pdf-outline" in context.cssAttr:
context.frag.outline = getBool(context.cssAttr["-pdf-outline"])
if "-pdf-outline-level" in context.cssAttr:
context.frag.outlineLevel = int(context.cssAttr["-pdf-outline-level"])
if "-pdf-outline-open" in context.cssAttr:
context.frag.outlineOpen = getBool(context.cssAttr["-pdf-outline-open"])
if "-pdf-word-wrap" in context.cssAttr:
context.frag.wordWrap = context.cssAttr["-pdf-word-wrap"]
# handle keep-in-frame
keepInFrameMode = None
keepInFrameMaxWidth = 0
keepInFrameMaxHeight = 0
if "-pdf-keep-in-frame-mode" in context.cssAttr:
value = str(context.cssAttr["-pdf-keep-in-frame-mode"]).strip().lower()
if value in ("shrink", "error", "overflow", "truncate"):
keepInFrameMode = value
if "-pdf-keep-in-frame-max-width" in context.cssAttr:
keepInFrameMaxWidth = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-width"]))
if "-pdf-keep-in-frame-max-height" in context.cssAttr:
keepInFrameMaxHeight = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-height"]))
# ignore nested keep-in-frames, tables have their own KIF handling
keepInFrame = keepInFrameMode is not None and context.keepInFrameIndex is None
if keepInFrame:
# keep track of current story index, so we can wrap everythink
# added after this point in a KeepInFrame
context.keepInFrameIndex = len(context.story)
# BEGIN tag
klass = globals().get("pisaTag%s" % node.tagName.replace(":", "").upper(), None)
obj = None
# Static block
elementId = attr.get("id", None)
staticFrame = context.frameStatic.get(elementId, None)
if staticFrame:
context.frag.insideStaticFrame += 1
oldStory = context.swapStory()
# Tag specific operations
if klass is not None:
obj = klass(node, attr)
obj.start(context)
# Visit child nodes
context.fragBlock = fragBlock = copy.copy(context.frag)
for nnode in node.childNodes:
pisaLoop(nnode, context, path, **kw)
context.fragBlock = fragBlock
# END tag
if obj:
obj.end(context)
# Block?
if isBlock:
context.addPara()
# XXX Buggy!
# Page break by CSS
if pageBreakAfter:
context.addStory(PageBreak())
if pageBreakAfter == PAGE_BREAK_RIGHT:
context.addStory(PmlRightPageBreak())
if pageBreakAfter == PAGE_BREAK_LEFT:
context.addStory(PmlLeftPageBreak())
if frameBreakAfter:
context.addStory(FrameBreak())
if keepInFrame:
# get all content added after start of -pdf-keep-in-frame and wrap
# it in a KeepInFrame
substory = context.story[context.keepInFrameIndex:]
context.story = context.story[:context.keepInFrameIndex]
context.story.append(
KeepInFrame(
content=substory,
maxWidth=keepInFrameMaxWidth,
maxHeight=keepInFrameMaxHeight))
context.keepInFrameIndex = None
# Static block, END
if staticFrame:
context.addPara()
for frame in staticFrame:
frame.pisaStaticStory = context.story
context.swapStory(oldStory)
context.frag.insideStaticFrame -= 1
# context.debug(1, indent, "</%s>" % (node.tagName))
# Reset frag style
context.pullFrag()
# Unknown or not handled
else:
# context.debug(1, indent, "???", node, node.nodeType, repr(node))
# Loop over children
for node in node.childNodes:
pisaLoop(node, context, path, **kw)
def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None):
"""
- Parse HTML and get miniDOM
- Extract CSS informations, add default CSS, parse CSS
- Handle the document DOM itself and build reportlab story
- Return Context object
"""
CSSAttrCache = {}
if xhtml:
#TODO: XHTMLParser doesn't see to exist...
parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom"))
else:
parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
if type(src) in types.StringTypes:
if type(src) is types.UnicodeType:
encoding = "utf8"
src = src.encode(encoding)
src = pisaTempFile(src, capacity=context.capacity)
# Test for the restrictions of html5lib
if encoding:
# Workaround for html5lib<0.11.1
if hasattr(inputstream, "isValidEncoding"):
if encoding.strip().lower() == "utf8":
encoding = "utf-8"
if not inputstream.isValidEncoding(encoding):
log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding)
else:
if inputstream.codecName(encoding) is None:
log.error("%r is not a valid encoding", encoding)
document = parser.parse(
src,
encoding=encoding)
if xml_output:
xml_output.write(document.toprettyxml(encoding="utf8"))
if default_css:
context.addCSS(default_css)
pisaPreLoop(document, context)
#try:
context.parseCSS()
#except:
# context.cssText = DEFAULT_CSS
# context.parseCSS()
# context.debug(9, pprint.pformat(context.css))
pisaLoop(document, context)
return context
# Shortcuts
HTML2PDF = pisaParser
def XHTML2PDF(*a, **kw):
kw["xhtml"] = True
return HTML2PDF(*a, **kw)
XML2PDF = XHTML2PDF
|
Bounder/xhtml2pdf
|
xhtml2pdf/parser.py
|
Python
|
apache-2.0
| 24,031
|
[
"VisIt"
] |
0849a42763bbf988b52b3f5dd2989cd510452eb327cc872d01edcf97c214a2c0
|
"""
Tests PatternGenerator (position and orientation), and some of the
basic patterns.
Original written as a unit test as part of the Topographica project.
"""
import os
import unittest
cwd = os.path.abspath(os.path.split(__file__)[0])
import param
import numpy as np
from numpy.testing import assert_array_equal
from holoviews.core.boundingregion import BoundingBox
from imagen import Constant,PatternGenerator
from imagen import Rectangle,Gaussian,Composite,Selector
import numbergen
class TestPatternGenerator(unittest.TestCase):
def test_a_basic_patterngenerator(self):
pattern_bounds = BoundingBox(points=((0.3,0.2),(0.5,0.5)))
pattern_target = np.array([[1,1],
[1,1],
[1,1]])
r = Rectangle(bounds=pattern_bounds,xdensity=10,
ydensity=10,aspect_ratio=1,size=1,smoothing=0.0)
assert_array_equal(r(),pattern_target)
def test_constant(self):
"""
Constant overrides PatternGenerator's usual matrix creation.
"""
pattern_bounds = BoundingBox(points=((0.3,0.2),(0.5,0.5)))
pattern_target = np.array([[1,1],
[1,1],
[1,1]])
c = Constant(bounds=pattern_bounds,xdensity=10.0,ydensity=10)
assert_array_equal(c(),pattern_target)
def test_position(self):
"""
Test that a pattern is drawn correctly at different
locations.
"""
initial = np.array([[0,0,0,0],
[0,1,1,0],
[0,1,1,0],
[0,0,0,0]])
r = Rectangle(bounds=BoundingBox(radius=2),xdensity=1,
ydensity=1,aspect_ratio=1,size=2,smoothing=0.0)
assert_array_equal(r(),initial)
### x offset
x_offset = np.array([[0,0,0,0],
[0,0,1,1],
[0,0,1,1],
[0,0,0,0]])
assert_array_equal(r(x=1),x_offset)
### y offset
y_offset = np.rot90(x_offset)
assert_array_equal(r(y=1),y_offset)
### x and y offset
target = np.array([[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0,0],
[1,1,0,0,0,0,0,0,0,0]])
width = 0.2
height = 0.4
r = Rectangle(bounds=BoundingBox(radius=0.5),
xdensity=10,ydensity=10,smoothing=0.0,
aspect_ratio=width/height,size=height)
assert_array_equal(r(x=-0.4,y=-0.3),target)
### x and y offset with bounds offset by the same
target = np.array([[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,1,1,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0]])
width = 0.2
height = 0.4
bounds = BoundingBox(points=((-0.9,-0.8),(0.1,0.2)))
r = Rectangle(bounds=bounds,xdensity=10,ydensity=10,smoothing=0.0,
aspect_ratio=width/height,size=height)
assert_array_equal(r(x=-0.4,y=-0.3),target)
def test_orientation_and_rotation(self):
"""
Test that a pattern is drawn with the correct orientation,
and is rotated correctly.
"""
### Test initial orientation and 90-degree rotation
target = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0]])
bounds = BoundingBox(radius=0.3)
xdensity = 10
ydensity = 10
width = 2.0/xdensity
height = 4.0/ydensity
rect = Rectangle(size=height,
aspect_ratio=width/height,smoothing=0.0,
xdensity=xdensity,ydensity=ydensity,bounds=bounds)
assert_array_equal(rect(),target)
assert_array_equal(rect(orientation=np.pi/2),np.rot90(target))
### 45-degree rotation about the origin
rot_45 = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_array_equal(rect(orientation=np.pi/4),rot_45)
### 45-degree rotation that's not about the origin
rot_45_offset = np.array([[0, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_array_equal(rect(x=-1.0/xdensity,y=1.0/ydensity,orientation=np.pi/4),
rot_45_offset)
def test_composite_pattern_basic(self):
"""
Test that a composite pattern consisting of just one Gaussian
is the same as that actual Gaussian pattern, and that a
composite pattern of two rectangles is the same as adding the
two individual matrices.
"""
bbox=BoundingBox(radius=0.5)
g = Gaussian(size=0.2,aspect_ratio=0.5,orientation=0,x=0.2,y=-0.03)
c = Composite(generators=[g],bounds=bbox,xdensity=7,ydensity=7)
assert_array_equal(g(bounds=bbox,xdensity=7,ydensity=7),c())
r1=Rectangle(size=0.2,aspect_ratio=1,x=0.3,y=0.3,orientation=0,smoothing=0.0)
r2=Rectangle(size=0.2,aspect_ratio=1,x=-0.3,y=-0.3,orientation=0,bounds=BoundingBox(radius=0.8),xdensity=2,smoothing=0.0)
c_true = r1(bounds=bbox,xdensity=7,ydensity=7)+r2(bounds=bbox,xdensity=7,ydensity=7)
c = Composite(generators=[r1,r2],bounds=bbox,xdensity=7,ydensity=7)
assert_array_equal(c(),c_true)
def test_composite_pattern_moves(self):
"""
Test that moving a composite pattern yields the correct pattern.
"""
bbox=BoundingBox(radius=0.5)
g = Gaussian(size=0.2,aspect_ratio=0.5,orientation=np.pi/3,x=0,y=0)
c = Composite(generators=[g],x=-0.3,y=0.4,xdensity=4,ydensity=4,bounds=bbox)
g_moved = g(x=-0.3,y=0.4,xdensity=4,ydensity=4,bounds=bbox)
assert_array_equal(c(),g_moved)
# Should also test rotating, resizing...
def test_bug__dynamic_param_advanced_by_repr(self):
"""Check for bug where repr of a PatternGenerator causes a DynamicNumber to change."""
# CEB: can probably remove this test now we have time-controlled dynamic parameters
p=PatternGenerator(x=numbergen.UniformRandom(lbound=-1,ubound=1,seed=1))
with param.Dynamic.time_fn as t:
t(0)
x0 = p.x
t(1)
x1 = p.x
self.assertNotEqual(x0,x1) # check we have setup something that actually changes
x2 = p.inspect_value('x')
repr(p)
x3 = p.inspect_value('x')
self.assertEqual(x2,x3) # value of x should not have been changed by repr(p)
# CB: does not test most features of Selector!
class TestSelector(unittest.TestCase):
def setUp(self):
self.g1 = Gaussian(x=numbergen.UniformRandom())
self.g2 = Gaussian(x=numbergen.UniformRandom())
self.s = Selector(generators=[self.g1,self.g2])
self.s.set_dynamic_time_fn(None,'generators')
def test_dynamic_index(self):
"""index should always vary"""
self.assertNotEqual(self.s.index,self.s.index)
def test_dynamic_inheritance(self):
"""time_fn should have been applied to subpatterns"""
self.assertNotEqual(self.g1.x,self.g1.x)
if __name__ == "__main__":
import nose
nose.runmodule()
|
ioam/imagen
|
tests/testpatterngenerator.py
|
Python
|
bsd-3-clause
| 8,450
|
[
"Gaussian"
] |
634d0d9bf720e0f5efb29d4b74b539af6d58f53568e9ef9bcf2950efc97c2341
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RDeseq2(RPackage):
"""Estimate variance-mean dependence in count data from
high-throughput sequencing assays and test for differential
expression based on a model using the negative binomial
distribution."""
homepage = "https://www.bioconductor.org/packages/DESeq2/"
url = "https://git.bioconductor.org/packages/DESeq2"
version('1.16.1', git='https://git.bioconductor.org/packages/DESeq2', commit='0a815574382704a08ef8b906eceb0296f81cded5')
depends_on('r@3.4.0:3.4.9', when='@1.16.1')
depends_on("r-rcpparmadillo", type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('r-genefilter', type=('build', 'run'))
depends_on('r-locfit', type=('build', 'run'))
depends_on('r-geneplotter', type=('build', 'run'))
depends_on('r-ggplot2', type=('build', 'run'))
depends_on('r-hmisc', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'))
|
skosukhin/spack
|
var/spack/repos/builtin/packages/r-deseq2/package.py
|
Python
|
lgpl-2.1
| 2,540
|
[
"Bioconductor"
] |
13f648bf5911986ea72ac04b1f8fe0e785c6722a495e35ec50ebc44fba8ebf54
|
#!/usr/bin/env python
helptext ='''This script will take a list of FASTA files and concatenate them for use in
phylogenetic inference. The sequence headers (up until the first space) must be identical
in each individual FASTA file.
Individual gene sequences should be aligned prior to running this script!
This script requires BioPython to read/write FASTA sequences.'''
import os,sys,argparse
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
def read_sequences(fastafiles):
'''Given a list of FASTA file names, read in each sequence to a dictionary of dictionaries, one per file'''
return {filename:SeqIO.to_dict(SeqIO.parse(filename,'fasta')) for filename in fastafiles}
def get_unique_names(gene_dict):
'''Given the dictionary of SeqRecord dictionaries, return a list of the unique sequence headers'''
all_names = []
for gene in gene_dict:
all_names += list(gene_dict[gene].keys())
return set(all_names)
def insert_sequences(gene_dict,unique_names):
'''Given the dictionary of dictionaries, insert blank sequences if any are missing for a gene'''
inserted_sequences = 0
for gene in gene_dict:
for name in unique_names:
if name not in gene_dict[gene]:
gene_length = len(next(iter(gene_dict[gene].values())))
gene_dict[gene][name] = SeqRecord(Seq("-"*gene_length),id=name)
inserted_sequences += 1
sys.stderr.write("{} Empty sequences inserted across all genes.\n".format(inserted_sequences))
return gene_dict
def concatenate_sequences(gene_dict,fastafiles,unique_names):
'''Given a dictionary of dictionaries with complete sampling in each gene, write out concatenated sequences to stdout. Returns a list of partition lengths.'''
new_seq_dict = {}
partition_lengths = []
for gene in fastafiles:
for name in unique_names:
try:
new_seq_dict[name] += gene_dict[gene][name]
except KeyError:
new_seq_dict[name] = gene_dict[gene][name]
partition_lengths.append(len(next(iter(gene_dict[gene].values()))))
for final_seq in new_seq_dict:
SeqIO.write(new_seq_dict[final_seq],sys.stdout,'fasta')
final_seq_length = len(new_seq_dict[final_seq])
sys.stderr.write("Final conatenated sequence length: {}\n".format(final_seq_length))
return partition_lengths
def raxml_partition(fastafiles,partition_lengths,partition_type):
'''Generate a raxml partition file for the given fastafiles. User specifies the partition type'''
gene_start = 1
partition_file = open("partition.raxml",'w')
if partition_type == 'CODON':
for g in range(len(fastafiles)):
codon3_start = gene_start + 2
codon3_end = gene_start + partition_lengths[g] - 1
codon1_end = codon3_end - 2
codon2_start = gene_start + 1
codon2_end = codon3_end - 1
partition_file.write("{},{}{}={}-{}\\3,{}-{}\\3\n".format("DNA",fastafiles[g],"12",gene_start,codon1_end,codon2_start,codon2_end))
partition_file.write("{},{}{}={}-{}\\3\n".format("DNA",fastafiles[g],"3",codon3_start,codon3_end))
gene_start = codon3_end + 1
else:
for g in range(len(fastafiles)):
gene_end = gene_start + partition_lengths[g] - 1
partition_file.write("{},{}={}-{}\n".format(partition_type,fastafiles[g],gene_start,gene_end))
gene_start = gene_end + 1
partition_file.close()
def main():
parser = argparse.ArgumentParser(description=helptext,formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--fastafiles",nargs='+',help="List of Fasta Files. Can use wildcard on Linux/Mac systems")
parser.add_argument("--filelist",help="File containing list of Fasta files. Alternative to --fastalist")
parser.add_argument("--raxml",help="Create a partition file 'partitions.raxml' intended for raxml in the current directory. For amino acid sequences, select the substitution model. To specify a separate model for 1st/2nd vs. 3rd codon positions, select CODON.",
choices = ['DNA','WAG','JTT','CODON'
],default=None)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.fastafiles:
#print args.fastafiles
if args.filelist:
sys.stderr.write("Specify either a list of FASTA files or a file containing names, not both!\n")
sys.exit(1)
else:
fastafiles = args.fastafiles
elif args.filelist:
#print args.filelist
if os.path.isfile(args.filelist):
fastafiles = [x.rstrip() for x in open(args.filelist)]
else:
sys.stderr.write("File containing list of FASTA files not found!")
sys.exit(1)
else:
sys.stderr.write("You must specify the FASTA files as a list or in a file.\n")
sys.exit(1)
sys.stderr.write("{} FASTA files found.\n".format(len(fastafiles)))
gene_dict = read_sequences(fastafiles)
sys.stderr.write("All sequences read successfully.\n")
unique_names = get_unique_names(gene_dict)
sys.stderr.write("{} Unique names found. If you were expecting fewer sequences, check your IDs!\n".format(len(unique_names)))
gaps_inserted = insert_sequences(gene_dict,unique_names)
partition_lengths = concatenate_sequences(gaps_inserted,fastafiles,unique_names)
if args.raxml:
raxml_partition(fastafiles,partition_lengths,args.raxml)
if __name__ == "__main__":main()
|
mossmatters/HybPiper
|
fasta_merge.py
|
Python
|
gpl-3.0
| 5,712
|
[
"Biopython"
] |
4b5394e7cf77be750529234e3fdaad7172a1e81f38a6cbb43d970c1f321e4c8c
|
"""
Read/write functions for Gaussian.
Written by:
Glen R. Jenness
University of Wisconsin - Madison
See accompanying license files for details.
"""
import numpy as np
import ase.units
from ase.atoms import Atoms
from ase.atom import Atom
from ase.calculators.singlepoint import SinglePointCalculator
from ase.io.gaussian_reader import GaussianReader as GR
from ase.calculators.gaussian import Gaussian
# http://www.gaussian.com/g_tech/g_ur/k_dft.htm
allowed_dft_functionals = ['lsda', # = 'svwn'
'svwn',
'svwn5', # != 'svwn'
'blyp',
'b3lyp',
'bp86',
'pbepbe',
'pbe1pbe', # pbe0
'm06',
'm06hf',
'm062x',
'tpssh',
'tpsstpss',
'wb97xd',
]
def read_gaussian_out(filename, index=-1, quantity='atoms'):
""""Interface to GaussianReader and returns various quantities"""
energy = 0.0
data = GR(filename)[index]
formula = data['Chemical_formula']
positions = np.array(data['Positions'])
method = data['Method']
version = data['Version']
if method.lower()[1:] in allowed_dft_functionals:
method = 'HF'
atoms = Atoms(formula, positions=positions)
for key, value in data.items():
if (key in method):
energy = value
try:
# Re-read in the log file
f = open(filename, 'r')
lines = f.readlines()
f.close()
forces = list()
for n, line in enumerate(lines):
if ('Forces (Hartrees/Bohr)' in line):
for j in range(len(atoms)):
forces += [[float(lines[n + j + 3].split()[2]),
float(lines[n + j + 3].split()[3]),
float(lines[n + j + 3].split()[4])]]
convert = ase.units.Hartree / ase.units.Bohr
forces = np.array(forces) * convert
except:
forces = None
energy *= ase.units.Hartree # Convert the energy from a.u. to eV
calc = SinglePointCalculator(atoms, energy=energy, forces=forces)
atoms.set_calculator(calc)
if (quantity == 'energy'):
return energy
elif (quantity == 'forces'):
return forces
elif (quantity == 'dipole'):
return data['Dipole']
elif (quantity == 'atoms'):
return atoms
elif (quantity == 'version'):
return version
def read_gaussian(filename):
"""Reads a Gaussian input file"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
atoms = Atoms()
for n, line in enumerate(lines):
if ('#' in line):
i = 0
while (lines[n + i + 5] != '\n'):
info = lines[n + i + 5].split()
symbol = info[0]
position = [float(info[1]), float(info[2]), float(info[3])]
atoms += Atom(symbol, position=position)
i += 1
return atoms
def write_gaussian(filename, atoms):
"""Writes a basic Gaussian input file"""
# Since Gaussian prints the geometry directly into the input file, we'll just
# the write_input method from the Gaussian calculator, and just use the
# default settings
calc = Gaussian()
calc.initialize(atoms)
calc.write_input(filename, atoms)
|
grhawk/ASE
|
tools/ase/io/gaussian.py
|
Python
|
gpl-2.0
| 3,516
|
[
"ASE",
"Gaussian"
] |
3f71f84f10774c40d84097338dad9dadbbab0e7524c3224250ebfd577a446e1c
|
#########################################################################
#
# perceptron.py - This file is part of the Spectral Python (SPy) package.
#
# Copyright (C) 2001-2014 Thomas Boggs
#
# Spectral Python is free software; you can redistribute it and/
# or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# Spectral Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; if not, write to
#
# Free Software Foundation, Inc.
# 59 Temple Place, Suite 330
# Boston, MA 02111-1307
# USA
#
#########################################################################
#
# Send comments to:
# Thomas Boggs, tboggs@users.sourceforge.net
#
'''
Classes and functions for classification with neural networks.
'''
from __future__ import division, print_function, unicode_literals
import numpy as np
import sys
class PerceptronLayer:
'''A multilayer perceptron layer with sigmoid activation function.'''
def __init__(self, shape, k=1.0, weights=None):
'''
Arguments:
`shape` (2-tuple of int):
Should have the form (`num_inputs`, `num_neurons`), where
`num_inputs` does not include an input for the bias weights.
`k` (float):
Sigmoid shape parameter.
`weights` (ndarray):
Initial weights for the layer. Note that if provided, this
argument must have shape (`num_neurons`, `num_inputs` + 1). If
not provided, initial weights will be randomized.
'''
self.k = k
self.shape = (shape[1], shape[0] + 1)
if weights:
if weights.shape != self.shape:
raise Exception('Shape of weight matrix does not ' \
'match Perceptron layer shape.')
self.weights = np.array(weights, dtype=np.float64)
else:
self.randomize_weights()
self.dW = np.zeros_like(self.weights)
self.dW_buf = np.zeros_like(self.dW)
self.x = np.ones(self.shape[1], float)
def randomize_weights(self):
'''Randomizes the layer weight matrix.
The bias weight will be in the range [0, 1). The remaining weights will
correspond to a vector with unit length and uniform random orienation.
'''
import math
self.weights = 1. - 2. * np.random.rand(*self.shape)
for row in self.weights:
row[1:] /= math.sqrt(np.sum(row[1:]**2))
row[0] = -0.5 * np.random.rand() - 0.5 * np.sum(row[1:])
def input(self, x, clip=0.0):
'''Sets layer input and computes output.
Arguments:
`x` (sequence):
Layer input, not including bias input.
`clip` (float >= 0):
Optional clipping value to limit sigmoid output. The sigmoid
function has output in the range (0, 1). If the `clip` argument
is set to `a` then all neuron outputs for the layer will be
constrained to the range [a, 1 - a]. This can improve perceptron
learning rate in some situations.
Return value:
The ndarray of output values is returned and is also set in the `y`
attribute of the layer.
For classifying samples, call `classify` instead.
'''
self.x[1:] = x
self.z = np.dot(self.weights, self.x)
if clip > 0.:
self.y = np.clip(self.g(self.z), clip, 1. - clip)
else:
self.y = self.g(self.z)
return self.y
def g(self, a):
'''Neuron activation function (logistic sigmoid)'''
return 1. / (1. + np.exp(- self.k * a))
def dy_da(self):
'''Derivative of activation function at current activation level.'''
return self.k * (self.y * (1.0 - self.y))
class Perceptron:
''' A Multi-Layer Perceptron network with backpropagation learning.'''
def __init__(self, layers, k=1.0):
'''
Creates the Perceptron network.
Arguments:
layers (sequence of integers):
A list specifying the network structure. `layers`[0] is the number
of inputs. `layers`[-1] is the number of perceptron outputs.
`layers`[1: -1] are the numbers of units in the hidden layers.
`k` (float):
Sigmoid shape parameter.
'''
if type(layers) != list or len(layers) < 2:
raise Exception('ERROR: Perceptron argument must be list of 2 or '
'more integers.')
self.shape = layers[:]
self.layers = [PerceptronLayer((layers[i - 1], layers[i]), k)
for i in range(1, len(layers))]
self.accuracy = 0
self.error = 0
# To prevent overflow when scaling inputs
self.min_input_diff = 1.e-8
# If True, previous iteration weights are preserved after interrupting
# training (with CTRL-C)
self.cache_weights = True
def input(self, x, clip=0.0):
'''Sets Perceptron input, activates neurons and sets & returns output.
Arguments:
`x` (sequence):
Inputs to input layer. Should not include a bias input.
`clip` (float >= 0):
Optional clipping value to limit sigmoid output. The sigmoid
function has output in the range (0, 1). If the `clip` argument
is set to `a` then all neuron outputs for the layer will be
constrained to the range [a, 1 - a]. This can improve perceptron
learning rate in some situations.
For classifying samples, call `classify` instead of `input`.
'''
self.x = x[:]
x = self._scale * (x - self._offset)
for layer in self.layers:
x = layer.input(x, clip)
self.y = np.array(x)
return x
def classify(self, x):
'''Classifies the given sample.
This has the same result as calling input and rounding the result.
'''
return [int(round(xx)) for xx in self.input(x)]
def train(self, X, Y, max_iterations=10000, accuracy=100.0, rate=0.3,
momentum=0., batch=1, clip=0.0, on_iteration=None,
stdout=sys.stdout):
'''
Trains the Perceptron to classify the given samples.
Arguments:
`X`:
The sequence of observations to be learned. Each element of `X`
must have a length corresponding to the input layer of the
network. Values in `X` are not required to be scaled.
`Y`:
Truth values corresponding to elements of `X`. `Y` must contain
as many elements as `X` and each element of `Y` must contain a
number of elements corresponding to the output layer of the
network. All values in `Y` should be in the range [0, 1] and for
training a classifier, values in `Y` are typically *only* 0 or 1
(i.e., no intermediate values).
`max_iterations` (int):
Maximum number of iterations through the data to perform.
Training will end sooner if the specified accuracy is reached in
fewer iterations.
`accuracy` (float):
The percent training accuracy at which to terminate training, if
the maximum number of iterations are not reached first. This
value can be set greater than 100 to force a specified number of
training iterations to be performed (e.g., to continue reducing
the error term after 100% classification accuracy has been
achieved.
`rate` (float):
The perceptron learning rate (typically in the range (0, 1]).
`momentum` (float):
The perceptron learning momentum term, which specifies the
fraction of the previous update value that should be added to
the current update term. The value should be in the range [0, 1).
`batch` (positive integer):
Specifies how many samples should be evaluated before an update
is made to the perceptron weights. A value of 0 indicates batch
updates should be performed (evaluate all training inputs prior
to updating). Otherwise, updates will be aggregated for every
`batch` inputs (i.e., `batch` == 1 is stochastic learning).
`clip` (float >= 0):
Optional clipping value to limit sigmoid output during training.
The sigmoid function has output in the range (0, 1). If the
`clip` argument is set to `a` then all neuron outputs for the
layer will be constrained to the range [a, 1 - a]. This can
improve perceptron learning rate in some situations.
After training the perceptron with a clipping value, `train` can
be called again with clipping set to 0 to continue reducing the
training error.
`on_iteration` (callable):
A callable object that accepts the perceptron as input and
returns bool. If this argument is set, the object will be called
at the end of each training iteration with the perceptron as its
argument. If the callable returns True, training will terminate.
`stdout`:
An object with a `write` method that can be set to redirect
training status messages somewhere other than stdout. To
suppress output, set `stats` to None.
'''
import itertools
import os
if stdout is None:
stdout = open(os.devnull, 'w')
try:
self._set_scaling(X)
for layer in self.layers:
layer.dW_old = np.zeros_like(layer.dW)
for iteration in range(max_iterations):
self._reset_corrections()
self.error = 0
num_samples = 0
num_correct = 0
num_summed = 0
for (x, t) in zip(X, Y):
num_samples += 1
num_summed += 1
num_correct += np.all(np.round(self.input(x, clip)) == t)
delta = np.array(t) - self.y
self.error += 0.5 * sum(delta**2)
# Determine incremental weight adjustments
self._update_dWs(t)
if batch > 0 and num_summed == batch:
self._adjust_weights(rate, momentum, num_summed,
stdout)
num_summed = 0
# In case a partial batch is remaining
if batch > 0 and num_summed > 0:
self._adjust_weights(rate, momentum, num_summed, stdout)
num_summed = 0
self.accuracy = 100. * num_correct / num_samples
if on_iteration and on_iteration(self):
return True
stdout.write('Iter % 5d: Accuracy = %.2f%% E = %f\n' %
(iteration, self.accuracy, self.error))
if self.accuracy >= accuracy:
stdout.write('Network trained to %.1f%% sample accuracy '
'in %d iterations.\n'
% (self.accuracy, iteration + 1))
return True
# If doing full batch learning (batch == 0)
if num_summed > 0:
self._adjust_weights(rate, momentum, num_summed, stdout)
num_summed = 0
except KeyboardInterrupt:
stdout.write("KeyboardInterrupt: Terminating training.\n")
self._reset_corrections()
return False
stdout.write('Terminating network training after %d iterations.\n' %
(iteration + 1))
return False
def _update_dWs(self, t):
'''Update weight adjustment values for the current sample.'''
# Output layer:
# dE/dy = t - y
# dz/dW = x
layerK = self.layers[-1]
layerK.delta = layerK.dy_da() * (t - self.y)
layerK.dW += np.outer(layerK.delta, layerK.x)
# Hidden layers
for i in range(len(self.layers) - 2, -1, -1):
(layerJ, layerK) = self.layers[i: i + 2]
b = np.dot(layerK.delta, layerK.weights[:, 1:])
layerJ.delta = layerJ.dy_da() * b
layerJ.dW += np.outer(layerJ.delta, layerJ.x)
def _adjust_weights(self, rate, momentum, num_summed, stdout):
'''Applies aggregated weight adjustments to the perceptron weights.'''
if self.cache_weights:
weights = [np.array(layer.weights) for layer in self.layers]
try:
if momentum > 0:
for layer in self.layers:
layer.dW *= (float(rate) / num_summed)
layer.dW += momentum * layer.dW_old
layer.weights += layer.dW
(layer.dW_old, layer.dW) = (layer.dW, layer.dW_old)
else:
for layer in self.layers:
layer.dW *= (float(rate) / num_summed)
layer.weights += layer.dW
except KeyboardInterrupt:
if self.cache_weights:
stdout.write('Interrupt during weight adjustment. Restoring ' \
'previous weights.\n')
for i in range(len(weights)):
self.layers[i].weights = weights[i]
else:
stdout.write('Interrupt during weight adjustment. Weight ' \
'cacheing was disabled so current weights may' \
'be corrupt.\n')
raise
finally:
self._reset_corrections()
def _reset_corrections(self):
for layer in self.layers:
layer.dW.fill(0)
def _set_scaling(self, X):
'''Sets translation/scaling of inputs to map X to the range [0, 1].'''
mins = maxes = None
for x in X:
if mins is None:
mins = x
maxes = x
else:
mins = np.min([mins, x], axis=0)
maxes = np.max([maxes, x], axis = 0)
self._offset = mins
r = maxes - mins
self._scale = 1. / np.where(r < self.min_input_diff, 1, r)
# Sample data
xor_data = [
[[0, 0], [0]],
[[0, 1], [1]],
[[1, 0], [1]],
[[1, 1], [0]],
]
xor_data2 = [
[[0, 0], [0, 1]],
[[0, 1], [1, 0]],
[[1, 0], [1, 0]],
[[1, 1], [0, 1]],
]
and_data = [
[[0, 0], [0]],
[[0, 1], [0]],
[[1, 0], [0]],
[[1, 1], [1]],
]
def test_case(XY, shape, *args, **kwargs):
(X, Y) = list(zip(*XY))
p = Perceptron(shape)
trained = p.train(X, Y, *args, **kwargs)
return (trained, p)
def test_xor(*args, **kwargs):
XY = xor_data
shape = [2, 2, 1]
return test_case(XY, shape, *args, **kwargs)
def test_xor222(*args, **kwargs):
XY = xor_data2
shape = [2, 2, 2]
return test_case(XY, shape, *args, **kwargs)
def test_xor231(*args, **kwargs):
XY = xor_data
shape = [2, 3, 1]
return test_case(XY, shape, *args, **kwargs)
def test_and(*args, **kwargs):
XY = and_data
shape = [2, 1]
return test_case(XY, shape, *args, **kwargs)
if __name__ == '__main__':
tests = [('AND (2x1)', test_and),
('XOR (2x2x1)', test_xor),
('XOR (2x2x2)', test_xor222),
('XOR (2x3x1)', test_xor231)]
results = [test[1](5000)[0] for test in tests]
nr = [(p[0][0], p[1]) for p in zip(tests, results)]
print()
print('Training results for 5000 iterations')
print('------------------------------------')
for (name, result) in nr:
s = [ 'FAILED', 'PASSED'][result]
print('{0:<20}: {1}'.format(name, s))
if False in results:
print('\nNote: XOR convergence for these small network sizes is')
print('dependent on initial weights, which are randomized. Try')
print('running the test again.')
|
ohspite/spectral
|
spectral/algorithms/perceptron.py
|
Python
|
gpl-2.0
| 16,914
|
[
"NEURON"
] |
a8559bb196e216907a0abd9fd77bd0834851cf828a46d58a5084a8d010506b0a
|
# sql/expression.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Defines the base components of SQL expression trees.
All components are derived from a common base class
:class:`ClauseElement`. Common behaviors are organized
based on class hierarchies, in some cases via mixins.
All object construction from this package occurs via functions which
in some cases will construct composite :class:`ClauseElement` structures
together, and in other cases simply return a single :class:`ClauseElement`
constructed directly. The function interface affords a more "DSL-ish"
feel to constructing SQL expressions and also allows future class
reorganizations.
Even though classes are not constructed directly from the outside,
most classes which have additional public methods are considered to be
public (i.e. have no leading underscore). Other classes which are
"semi-public" are marked with a single leading underscore; these
classes usually have few or no public methods and are less guaranteed
to stay the same in future releases.
"""
import itertools, re
from operator import attrgetter
from sqlalchemy import util, exc
from sqlalchemy.sql import operators
from sqlalchemy.sql.visitors import Visitable, cloned_traverse
import operator
functions = util.importlater("sqlalchemy.sql", "functions")
sqlutil = util.importlater("sqlalchemy.sql", "util")
sqltypes = util.importlater("sqlalchemy", "types")
default = util.importlater("sqlalchemy.engine", "default")
__all__ = [
'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
'except_', 'except_all', 'exists', 'extract', 'func', 'modifier',
'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label',
'literal', 'literal_column', 'not_', 'null', 'or_', 'outparam',
'outerjoin', 'select', 'subquery', 'table', 'text', 'tuple_', 'type_coerce',
'union', 'union_all', 'update', ]
PARSE_AUTOCOMMIT = util._symbol('PARSE_AUTOCOMMIT')
def desc(column):
"""Return a descending ``ORDER BY`` clause element.
e.g.::
order_by = [desc(table1.mycol)]
"""
return _UnaryExpression(column, modifier=operators.desc_op)
def asc(column):
"""Return an ascending ``ORDER BY`` clause element.
e.g.::
order_by = [asc(table1.mycol)]
"""
return _UnaryExpression(column, modifier=operators.asc_op)
def outerjoin(left, right, onclause=None):
"""Return an ``OUTER JOIN`` clause element.
The returned object is an instance of :class:`Join`.
Similar functionality is also available via the :func:`outerjoin()`
method on any :class:`FromClause`.
left
The left side of the join.
right
The right side of the join.
onclause
Optional criterion for the ``ON`` clause, is derived from
foreign key relationships established between left and right
otherwise.
To chain joins together, use the :func:`join()` or :func:`outerjoin()`
methods on the resulting :class:`Join` object.
"""
return Join(left, right, onclause, isouter=True)
def join(left, right, onclause=None, isouter=False):
"""Return a ``JOIN`` clause element (regular inner join).
The returned object is an instance of :class:`Join`.
Similar functionality is also available via the :func:`join()` method
on any :class:`FromClause`.
left
The left side of the join.
right
The right side of the join.
onclause
Optional criterion for the ``ON`` clause, is derived from
foreign key relationships established between left and right
otherwise.
To chain joins together, use the :func:`join()` or :func:`outerjoin()`
methods on the resulting :class:`Join` object.
"""
return Join(left, right, onclause, isouter)
def select(columns=None, whereclause=None, from_obj=[], **kwargs):
"""Returns a ``SELECT`` clause element.
Similar functionality is also available via the :func:`select()`
method on any :class:`FromClause`.
The returned object is an instance of :class:`Select`.
All arguments which accept :class:`ClauseElement` arguments also accept
string arguments, which will be converted as appropriate into
either :func:`text()` or :func:`literal_column()` constructs.
:param columns:
A list of :class:`ClauseElement` objects, typically
:class:`ColumnElement` objects or subclasses, which will form the
columns clause of the resulting statement. For all members which are
instances of :class:`Selectable`, the individual :class:`ColumnElement`
members of the :class:`Selectable` will be added individually to the
columns clause. For example, specifying a
:class:`~sqlalchemy.schema.Table` instance will result in all the
contained :class:`~sqlalchemy.schema.Column` objects within to be added
to the columns clause.
This argument is not present on the form of :func:`select()`
available on :class:`~sqlalchemy.schema.Table`.
:param whereclause:
A :class:`ClauseElement` expression which will be used to form the
``WHERE`` clause.
:param from_obj:
A list of :class:`ClauseElement` objects which will be added to the
``FROM`` clause of the resulting statement. Note that "from" objects are
automatically located within the columns and whereclause ClauseElements.
Use this parameter to explicitly specify "from" objects which are not
automatically locatable. This could include
:class:`~sqlalchemy.schema.Table` objects that aren't otherwise present,
or :class:`Join` objects whose presence will supercede that of the
:class:`~sqlalchemy.schema.Table` objects already located in the other
clauses.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param prefixes:
a list of strings or :class:`ClauseElement` objects to include
directly after the SELECT keyword in the generated statement,
for dialect-specific query features.
:param distinct=False:
when ``True``, applies a ``DISTINCT`` qualifier to the columns
clause of the resulting statement.
:param use_labels=False:
when ``True``, the statement will be generated using labels
for each column in the columns clause, which qualify each
column with its parent table's (or aliases) name so that name
conflicts between columns in different tables don't occur.
The format of the label is <tablename>_<column>. The "c"
collection of the resulting :class:`Select` object will use these
names as well for targeting column members.
:param for_update=False:
when ``True``, applies ``FOR UPDATE`` to the end of the
resulting statement. Certain database dialects also support
alternate values for this parameter, for example mysql
supports "read" which translates to ``LOCK IN SHARE MODE``,
and oracle supports "nowait" which translates to ``FOR UPDATE
NOWAIT``.
:param correlate=True:
indicates that this :class:`Select` object should have its
contained :class:`FromClause` elements "correlated" to an enclosing
:class:`Select` object. This means that any :class:`ClauseElement`
instance within the "froms" collection of this :class:`Select`
which is also present in the "froms" collection of an
enclosing select will not be rendered in the ``FROM`` clause
of this select statement.
:param group_by:
a list of :class:`ClauseElement` objects which will comprise the
``GROUP BY`` clause of the resulting select.
:param having:
a :class:`ClauseElement` that will comprise the ``HAVING`` clause
of the resulting select when ``GROUP BY`` is used.
:param order_by:
a scalar or list of :class:`ClauseElement` objects which will
comprise the ``ORDER BY`` clause of the resulting select.
:param limit=None:
a numerical value which usually compiles to a ``LIMIT``
expression in the resulting select. Databases that don't
support ``LIMIT`` will attempt to provide similar
functionality.
:param offset=None:
a numeric value which usually compiles to an ``OFFSET``
expression in the resulting select. Databases that don't
support ``OFFSET`` will attempt to provide similar
functionality.
:param bind=None:
an ``Engine`` or ``Connection`` instance to which the
resulting ``Select ` object will be bound. The ``Select``
object will otherwise automatically bind to whatever
``Connectable`` instances can be located within its contained
:class:`ClauseElement` members.
"""
return Select(columns, whereclause=whereclause, from_obj=from_obj,
**kwargs)
def subquery(alias, *args, **kwargs):
"""Return an :class:`Alias` object derived
from a :class:`Select`.
name
alias name
\*args, \**kwargs
all other arguments are delivered to the
:func:`select` function.
"""
return Select(*args, **kwargs).alias(alias)
def insert(table, values=None, inline=False, **kwargs):
"""Return an :class:`Insert` clause element.
Similar functionality is available via the :func:`insert()` method on
:class:`~sqlalchemy.schema.Table`.
:param table: The table to be inserted into.
:param values: A dictionary which specifies the column specifications of
the ``INSERT``, and is optional. If left as None, the column
specifications are determined from the bind parameters used during the
compile phase of the ``INSERT`` statement. If the bind parameters also
are None during the compile phase, then the column specifications will be
generated from the full list of table columns. Note that the
:meth:`~Insert.values()` generative method may also be used for this.
:param prefixes: A list of modifier keywords to be inserted between INSERT
and INTO. Alternatively, the :meth:`~Insert.prefix_with` generative
method may be used.
:param inline: if True, SQL defaults will be compiled 'inline' into the
statement and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column`
objects or their string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
"""
return Insert(table, values, inline=inline, **kwargs)
def update(table, whereclause=None, values=None, inline=False, **kwargs):
"""Return an :class:`Update` clause element.
Similar functionality is available via the :func:`update()` method on
:class:`~sqlalchemy.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the
:meth:`~Update.where()` generative method may also be used for this.
:param values:
A dictionary which specifies the ``SET`` conditions of the
``UPDATE``, and is optional. If left as None, the ``SET``
conditions are determined from the bind parameters used during
the compile phase of the ``UPDATE`` statement. If the bind
parameters also are None during the compile phase, then the
``SET`` conditions will be generated from the full list of table
columns. Note that the :meth:`~Update.values()` generative method may
also be used for this.
:param inline:
if True, SQL defaults will be compiled 'inline' into the statement
and not pre-executed.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either :class:`~sqlalchemy.schema.Column`
objects or their
string identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``UPDATE`` statement's table, the statement will be correlated
against the ``UPDATE`` statement.
"""
return Update(
table,
whereclause=whereclause,
values=values,
inline=inline,
**kwargs)
def delete(table, whereclause = None, **kwargs):
"""Return a :class:`Delete` clause element.
Similar functionality is available via the :func:`delete()` method on
:class:`~sqlalchemy.schema.Table`.
:param table: The table to be updated.
:param whereclause: A :class:`ClauseElement` describing the ``WHERE``
condition of the ``UPDATE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
"""
return Delete(table, whereclause, **kwargs)
def and_(*clauses):
"""Join a list of clauses together using the ``AND`` operator.
The ``&`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.and_, *clauses)
def or_(*clauses):
"""Join a list of clauses together using the ``OR`` operator.
The ``|`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
if len(clauses) == 1:
return clauses[0]
return BooleanClauseList(operator=operators.or_, *clauses)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`_CompareMixin` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
def distinct(expr):
"""Return a ``DISTINCT`` clause."""
expr = _literal_as_binds(expr)
return _UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type)
def between(ctest, cleft, cright):
"""Return a ``BETWEEN`` predicate clause.
Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``.
The :func:`between()` method on all
:class:`_CompareMixin` subclasses provides
similar functionality.
"""
ctest = _literal_as_binds(ctest)
return ctest.between(cleft, cright)
def case(whens, value=None, else_=None):
"""Produce a ``CASE`` statement.
whens
A sequence of pairs, or alternatively a dict,
to be translated into "WHEN / THEN" clauses.
value
Optional for simple case statements, produces
a column expression as in "CASE <expr> WHEN ..."
else\_
Optional as well, for case defaults produces
the "ELSE" portion of the "CASE" statement.
The expressions used for THEN and ELSE,
when specified as strings, will be interpreted
as bound values. To specify textual SQL expressions
for these, use the :func:`literal_column`
construct.
The expressions used for the WHEN criterion
may only be literal strings when "value" is
present, i.e. CASE table.somecol WHEN "x" THEN "y".
Otherwise, literal strings are not accepted
in this position, and either the text(<string>)
or literal(<string>) constructs must be used to
interpret raw string values.
Usage examples::
case([(orderline.c.qty > 100, item.c.specialprice),
(orderline.c.qty > 10, item.c.bulkprice)
], else_=item.c.regularprice)
case(value=emp.c.type, whens={
'engineer': emp.c.salary * 1.1,
'manager': emp.c.salary * 3,
})
Using :func:`literal_column()`, to allow for databases that
do not support bind parameters in the ``then`` clause. The type
can be specified which determines the type of the :func:`case()` construct
overall::
case([(orderline.c.qty > 100,
literal_column("'greaterthan100'", String)),
(orderline.c.qty > 10, literal_column("'greaterthan10'",
String))
], else_=literal_column("'lethan10'", String))
"""
return _Case(whens, value=value, else_=else_)
def cast(clause, totype, **kwargs):
"""Return a ``CAST`` function.
Equivalent of SQL ``CAST(clause AS totype)``.
Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e::
cast(table.c.unit_price * table.c.qty, Numeric(10,4))
or::
cast(table.c.timestamp, DATE)
"""
return _Cast(clause, totype, **kwargs)
def extract(field, expr):
"""Return the clause ``extract(field FROM expr)``."""
return _Extract(field, expr)
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``."""
expr = _literal_as_binds(expression)
return _BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def exists(*args, **kwargs):
"""Return an ``EXISTS`` clause as applied to a :class:`Select` object.
Calling styles are of the following forms::
# use on an existing select()
s = select([table.c.col1]).where(table.c.col2==5)
s = exists(s)
# construct a select() at once
exists(['*'], **select_arguments).where(criterion)
# columns argument is optional, generates "EXISTS (SELECT *)"
# by default.
exists().where(table.c.col2==5)
"""
return _Exists(*args, **kwargs)
def union(*selects, **kwargs):
"""Return a ``UNION`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
A similar :func:`union()` method is available on all
:class:`FromClause` subclasses.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs)
def union_all(*selects, **kwargs):
"""Return a ``UNION ALL`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
A similar :func:`union_all()` method is available on all
:class:`FromClause` subclasses.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs)
def except_(*selects, **kwargs):
"""Return an ``EXCEPT`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs)
def except_all(*selects, **kwargs):
"""Return an ``EXCEPT ALL`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs)
def intersect(*selects, **kwargs):
"""Return an ``INTERSECT`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs)
def intersect_all(*selects, **kwargs):
"""Return an ``INTERSECT ALL`` of multiple selectables.
The returned object is an instance of
:class:`CompoundSelect`.
\*selects
a list of :class:`Select` instances.
\**kwargs
available keyword arguments are the same as those of
:func:`select`.
"""
return CompoundSelect(CompoundSelect.INTERSECT_ALL, *selects, **kwargs)
def alias(selectable, alias=None):
"""Return an :class:`Alias` object.
An :class:`Alias` represents any :class:`FromClause`
with an alternate name assigned within SQL, typically using the ``AS``
clause when generated, e.g. ``SELECT * FROM table AS aliasname``.
Similar functionality is available via the :func:`alias()` method
available on all :class:`FromClause` subclasses.
selectable
any :class:`FromClause` subclass, such as a table, select
statement, etc..
alias
string name to be assigned as the alias. If ``None``, a
random name will be generated.
"""
return Alias(selectable, alias=alias)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`_CompareMixin`
subclass, such as a :class:`~sqlalchemy.schema.Column` object. Use this function to force the
generation of a literal clause, which will be created as a
:class:`_BindParamClause` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return _BindParamClause(None, value, type_=type_, unique=True)
def tuple_(*expr):
"""Return a SQL tuple.
Main usage is to produce a composite IN construct::
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
"""
return _Tuple(*expr)
def type_coerce(expr, type_):
"""Coerce the given expression into the given type, on the Python side only.
:func:`.type_coerce` is roughly similar to :func:.`cast`, except no
"CAST" expression is rendered - the given type is only applied towards
expression typing and against received result values.
e.g.::
from sqlalchemy.types import TypeDecorator
import uuid
class AsGuid(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value is not None:
return str(value)
else:
return None
def process_result_value(self, value, dialect):
if value is not None:
return uuid.UUID(value)
else:
return None
conn.execute(
select([type_coerce(mytable.c.ident, AsGuid)]).\\
where(
type_coerce(mytable.c.ident, AsGuid) ==
uuid.uuid3(uuid.NAMESPACE_URL, 'bar')
)
)
"""
if hasattr(expr, '__clause_expr__'):
return type_coerce(expr.__clause_expr__())
elif not isinstance(expr, Visitable):
if expr is None:
return null()
else:
return literal(expr, type_=type_)
else:
return _Label(None, expr, type_=type_)
def label(name, obj):
"""Return a :class:`_Label` object for the
given :class:`ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:func:`label()` method on :class:`ColumnElement`.
name
label name
obj
a :class:`ColumnElement`.
"""
return _Label(name, obj)
def column(text, type_=None):
"""Return a textual column clause, as would be in the columns clause of a
``SELECT`` statement.
The object returned is an instance of
:class:`ColumnClause`, which represents the
"syntactical" portion of the schema-level
:class:`~sqlalchemy.schema.Column` object.
text
the name of the column. Quoting rules will be applied to the
clause like any other column name. For textual column
constructs that are not to be quoted, use the
:func:`literal_column` function.
type\_
an optional :class:`~sqlalchemy.types.TypeEngine` object which will
provide result-set translation for this column.
"""
return ColumnClause(text, type_=type_)
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
text
the text of the expression; can be any SQL expression. Quoting rules
will not be applied. To specify a column-name expression which should
be subject to quoting rules, use the
:func:`column` function.
type\_
an optional :class:`~sqlalchemy.types.TypeEngine` object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
def table(name, *columns):
"""Return a :class:`TableClause` object.
This is a primitive version of the :class:`~sqlalchemy.schema.Table` object,
which is a subclass of this object.
"""
return TableClause(name, *columns)
def bindparam(key, value=None, type_=None, unique=False, required=False):
"""Create a bind parameter clause with the given key.
value
a default value for this bind parameter. a bindparam with a
value is called a ``value-based bindparam``.
type\_
a sqlalchemy.types.TypeEngine object indicating the type of this
bind param, will invoke type-specific bind parameter processing
unique
if True, bind params sharing the same name will have their
underlying ``key`` modified to a uniquely generated name.
mostly useful with value-based bind params.
required
A value is required at execution time.
"""
if isinstance(key, ColumnClause):
return _BindParamClause(key.name, value, type_=key.type,
unique=unique, required=required)
else:
return _BindParamClause(key, value, type_=type_,
unique=unique, required=required)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return _BindParamClause(
key, None, type_=type_, unique=False, isoutparam=True)
def text(text, bind=None, *args, **kwargs):
"""Create a SQL construct that is represented by a literal string.
E.g.::
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
To invoke SQLAlchemy typing logic for bind parameters, the
``bindparams`` list allows specification of :func:`bindparam`
constructs which specify the type for a given name::
t = text("SELECT id FROM users WHERE updated_at>:updated",
bindparams=[bindparam('updated', DateTime())]
)
Typing during result row processing is also an important concern.
Result column types
are specified using the ``typemap`` dictionary, where the keys
match the names of columns. These names are taken from what
the DBAPI returns as ``cursor.description``::
t = text("SELECT id, name FROM users",
typemap={
'id':Integer,
'name':Unicode
}
)
The :func:`text` construct is used internally for most cases when
a literal string is specified for part of a larger query, such as
within :func:`select()`, :func:`update()`,
:func:`insert()` or :func:`delete()`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`text` construct that should be subject to "autocommit"
can be set explicitly so using the ``autocommit`` option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`text` constructs - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
a list of :func:`bindparam()` instances which can be used to define
the types and/or initial values for the bind parameters within
the textual statement; the keynames of the bindparams must match
those within the text of the statement. The types will be used
for pre-processing on bind values.
:param typemap:
a dictionary mapping the names of columns represented in the
columns clause of a ``SELECT`` statement to type objects,
which will be used to perform post-processing on columns within
the result set. This argument applies to any expression
that returns result sets.
"""
return _TextClause(text, bind=bind, *args, **kwargs)
def null():
"""Return a :class:`_Null` object, which compiles to ``NULL`` in a sql
statement.
"""
return _Null()
class _FunctionGenerator(object):
"""Generate :class:`Function` objects based on getattr calls."""
def __init__(self, **opts):
self.__names = []
self.opts = opts
def __getattr__(self, name):
# passthru __ attributes; fixes pydoc
if name.startswith('__'):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
elif name.endswith('_'):
name = name[0:-1]
f = _FunctionGenerator(**self.opts)
f.__names = list(self.__names) + [name]
return f
def __call__(self, *c, **kwargs):
o = self.opts.copy()
o.update(kwargs)
if len(self.__names) == 1:
func = getattr(functions, self.__names[-1].lower(), None)
if func is not None and \
isinstance(func, type) and \
issubclass(func, Function):
return func(*c, **o)
return Function(self.__names[-1],
packagenames=self.__names[0:-1], *c, **o)
# "func" global - i.e. func.count()
func = _FunctionGenerator()
# "modifier" global - i.e. modifier.distinct
# TODO: use UnaryExpression for this instead ?
modifier = _FunctionGenerator(group=False)
class _generated_label(unicode):
"""A unicode subclass used to identify dynamically generated names."""
def _escape_for_generated(x):
if isinstance(x, _generated_label):
return x
else:
return x.replace('%', '%%')
def _clone(element):
return element._clone()
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the enties present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _from_objects(*elements):
return itertools.chain(*[element._from_objects for element in elements])
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _column_as_key(element):
if isinstance(element, basestring):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
return element.key
def _literal_as_text(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
return _TextClause(unicode(element))
else:
return element
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_column(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
return literal_column(str(element))
else:
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return null()
else:
return _BindParamClause(name, element, type_=type_, unique=True)
else:
return element
def _type_from_args(args):
for a in args:
if not isinstance(a.type, sqltypes.NullType):
return a.type
else:
return sqltypes.NullType
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError("Column-based expression object expected for argument '%s'; "
"got: '%s', type %s" % (name, element, type(element)))
return element
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),fromclause.description)
)
return c
@util.decorator
def _generative(fn, *args, **kw):
"""Mark a method as generative."""
self = args[0]._generate()
fn(self, *args[1:], **kw)
return self
def is_column(col):
"""True if ``col`` is an instance of :class:`ColumnElement`."""
return isinstance(col, ColumnElement)
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
_bind = None
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
c.__dict__.pop('_cloned_set', None)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned anscestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = getattr(f, '_is_clone_of', None)
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
if util.jython:
def __hash__(self):
"""Return a distinct hash code.
ClauseElements may have special equality comparisons which
makes us rely on them having unique hash codes for use in
hash-based collections. Stock __hash__ doesn't guarantee
unique values on platforms with moving GCs.
"""
return id(self)
def _annotate(self, values):
"""return a copy of this ClauseElement with the given annotations
dictionary.
"""
return sqlutil.Annotated(self, values)
def _deannotate(self):
"""return a copy of this ClauseElement with an empty annotations
dictionary.
"""
return self._clone()
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elments replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elments replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam':visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:func:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
# TODO: remove .bind as a method from the root ClauseElement.
# we should only be deriving binds from FromClause elements
# and certain SchemaItem subclasses.
# the "search_for_bind" functionality can still be used by
# execute(), however.
@property
def bind(self):
"""Returns the Engine or Connection to which this ClauseElement is
bound, or None if none found.
"""
if self._bind is not None:
return self._bind
for f in _from_objects(self):
if f is self:
continue
engine = f.bind
if engine is not None:
return engine
else:
return None
@util.pending_deprecation('0.7',
'Only SQL expressions which subclass '
':class:`.Executable` may provide the '
':func:`.execute` method.')
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`ClauseElement`.
"""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not bound and does not support direct '
'execution. Supply this statement to a Connection or '
'Engine for execution. Or, assign a bind to the statement '
'or the Metadata of its underlying tables to enable '
'implicit execution via this method.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
@util.pending_deprecation('0.7',
'Only SQL expressions which subclass '
':class:`.Executable` may provide the '
':func:`.scalar` method.')
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`ClauseElement`, returning
the result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
def compile(self, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~sqlalchemy.engine.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~sqlalchemy.engine.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance frmo which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`ClauseElement`'s bound engine, if
any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
compiler = self._compiler(dialect, bind=bind, **kw)
compiler.compile()
return compiler
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
# Py3K
#return unicode(self.compile())
# Py2K
return unicode(self.compile()).encode('ascii', 'backslashreplace')
# end Py2K
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
return self._negate()
def __nonzero__(self):
raise TypeError("Boolean value of this clause is not defined")
def _negate(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return _UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class _Immutable(object):
"""mark a ClauseElement as 'immutable' when expressions are cloned."""
def unique_params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def params(self, *optionaldict, **kwargs):
raise NotImplementedError("Immutable objects do not support copying")
def _clone(self):
return self
class Operators(object):
def __and__(self, other):
return self.operate(operators.and_, other)
def __or__(self, other):
return self.operate(operators.or_, other)
def __invert__(self):
return self.operate(operators.inv)
def op(self, opstring):
def op(b):
return self.operate(operators.op, opstring, b)
return op
def operate(self, op, *other, **kwargs):
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
raise NotImplementedError(str(op))
class ColumnOperators(Operators):
"""Defines comparison and math operations."""
timetuple = None
"""Hack, allows datetime objects to be compared on the LHS."""
def __lt__(self, other):
return self.operate(operators.lt, other)
def __le__(self, other):
return self.operate(operators.le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
return self.operate(operators.eq, other)
def __ne__(self, other):
return self.operate(operators.ne, other)
def __gt__(self, other):
return self.operate(operators.gt, other)
def __ge__(self, other):
return self.operate(operators.ge, other)
def __neg__(self):
return self.operate(operators.neg)
def concat(self, other):
return self.operate(operators.concat_op, other)
def like(self, other, escape=None):
return self.operate(operators.like_op, other, escape=escape)
def ilike(self, other, escape=None):
return self.operate(operators.ilike_op, other, escape=escape)
def in_(self, other):
return self.operate(operators.in_op, other)
def startswith(self, other, **kwargs):
return self.operate(operators.startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
return self.operate(operators.endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
return self.operate(operators.contains_op, other, **kwargs)
def match(self, other, **kwargs):
return self.operate(operators.match_op, other, **kwargs)
def desc(self):
return self.operate(operators.desc_op)
def asc(self):
return self.operate(operators.asc_op)
def collate(self, collation):
return self.operate(operators.collate, collation)
def __radd__(self, other):
return self.reverse_operate(operators.add, other)
def __rsub__(self, other):
return self.reverse_operate(operators.sub, other)
def __rmul__(self, other):
return self.reverse_operate(operators.mul, other)
def __rdiv__(self, other):
return self.reverse_operate(operators.div, other)
def between(self, cleft, cright):
return self.operate(operators.between_op, cleft, cright)
def distinct(self):
return self.operate(operators.distinct_op)
def __add__(self, other):
return self.operate(operators.add, other)
def __sub__(self, other):
return self.operate(operators.sub, other)
def __mul__(self, other):
return self.operate(operators.mul, other)
def __div__(self, other):
return self.operate(operators.div, other)
def __mod__(self, other):
return self.operate(operators.mod, other)
def __truediv__(self, other):
return self.operate(operators.truediv, other)
def __rtruediv__(self, other):
return self.reverse_operate(operators.truediv, other)
class _CompareMixin(ColumnOperators):
"""Defines comparison and math operations for :class:`ClauseElement`
instances."""
def __compare(self, op, obj, negate=None, reverse=False,
**kwargs
):
if obj is None or isinstance(obj, _Null):
if op == operators.eq:
return _BinaryExpression(self, null(), operators.is_,
negate=operators.isnot)
elif op == operators.ne:
return _BinaryExpression(self, null(), operators.isnot,
negate=operators.is_)
else:
raise exc.ArgumentError("Only '='/'!=' operators can "
"be used with NULL")
else:
obj = self._check_literal(op, obj)
if reverse:
return _BinaryExpression(obj,
self,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
else:
return _BinaryExpression(self,
obj,
op,
type_=sqltypes.BOOLEANTYPE,
negate=negate, modifiers=kwargs)
def __operate(self, op, obj, reverse=False):
obj = self._check_literal(op, obj)
if reverse:
left, right = obj, self
else:
left, right = self, obj
if left.type is None:
op, result_type = sqltypes.NULLTYPE._adapt_expression(op,
right.type)
elif right.type is None:
op, result_type = left.type._adapt_expression(op,
sqltypes.NULLTYPE)
else:
op, result_type = left.type._adapt_expression(op,
right.type)
return _BinaryExpression(left, right, op, type_=result_type)
# a mapping of operators with the method they use, along with their negated
# operator for comparison operators
operators = {
operators.add : (__operate,),
operators.mul : (__operate,),
operators.sub : (__operate,),
# Py2K
operators.div : (__operate,),
# end Py2K
operators.mod : (__operate,),
operators.truediv : (__operate,),
operators.lt : (__compare, operators.ge),
operators.le : (__compare, operators.gt),
operators.ne : (__compare, operators.eq),
operators.gt : (__compare, operators.le),
operators.ge : (__compare, operators.lt),
operators.eq : (__compare, operators.ne),
operators.like_op : (__compare, operators.notlike_op),
operators.ilike_op : (__compare, operators.notilike_op),
}
def operate(self, op, *other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other[0], *o[1:], **kwargs)
def reverse_operate(self, op, other, **kwargs):
o = _CompareMixin.operators[op]
return o[0](self, op, other, reverse=True, *o[1:], **kwargs)
def in_(self, other):
"""Compare this element to the given element or collection using IN."""
return self._in_impl(operators.in_op, operators.notin_op, other)
def _in_impl(self, op, negate_op, seq_or_selectable):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, _ScalarSelect):
return self.__compare(op, seq_or_selectable,
negate=negate_op)
elif isinstance(seq_or_selectable, _SelectBaseMixin):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return self.__compare(op, seq_or_selectable.as_scalar(),
negate=negate_op)
elif isinstance(seq_or_selectable, (Selectable, _TextClause)):
return self.__compare(op, seq_or_selectable,
negate=negate_op)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, _CompareMixin):
raise exc.InvalidRequestError('in() function accept'
's either a list of non-selectable values, '
'or a selectable: %r' % o)
else:
o = self._bind_param(op, o)
args.append(o)
if len(args) == 0:
# Special case handling for empty IN's, behave like
# comparison against zero row selectable. We use != to
# build the contradiction as it handles NULL values
# appropriately, i.e. "not (x IN ())" should not return NULL
# values for x.
util.warn('The IN-predicate on "%s" was invoked with an '
'empty sequence. This results in a '
'contradiction, which nonetheless can be '
'expensive to evaluate. Consider alternative '
'strategies for improved performance.' % self)
return self != self
return self.__compare(op,
ClauseList(*args).self_group(against=op),
negate=negate_op)
def __neg__(self):
return _UnaryExpression(self, operator=operators.neg)
def startswith(self, other, escape=None):
"""Produce the clause ``LIKE '<other>%'``"""
# use __radd__ to force string concat behavior
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String).__radd__(
self._check_literal(operators.like_op, other)
),
escape=escape)
def endswith(self, other, escape=None):
"""Produce the clause ``LIKE '%<other>'``"""
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) +
self._check_literal(operators.like_op, other),
escape=escape)
def contains(self, other, escape=None):
"""Produce the clause ``LIKE '%<other>%'``"""
return self.__compare(
operators.like_op,
literal_column("'%'", type_=sqltypes.String) +
self._check_literal(operators.like_op, other) +
literal_column("'%'", type_=sqltypes.String),
escape=escape)
def match(self, other):
"""Produce a MATCH clause, i.e. ``MATCH '<other>'``
The allowed contents of ``other`` are database backend specific.
"""
return self.__compare(operators.match_op,
self._check_literal(operators.match_op,
other))
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return _Label(name, self, self.type)
def desc(self):
"""Produce a DESC clause, i.e. ``<columnname> DESC``"""
return desc(self)
def asc(self):
"""Produce a ASC clause, i.e. ``<columnname> ASC``"""
return asc(self)
def distinct(self):
"""Produce a DISTINCT clause, i.e. ``DISTINCT <columnname>``"""
return _UnaryExpression(self, operator=operators.distinct_op,
type_=self.type)
def between(self, cleft, cright):
"""Produce a BETWEEN clause, i.e. ``<column> BETWEEN <cleft> AND
<cright>``"""
return _BinaryExpression(
self,
ClauseList(
self._check_literal(operators.and_, cleft),
self._check_literal(operators.and_, cright),
operator=operators.and_,
group=False),
operators.between_op)
def collate(self, collation):
"""Produce a COLLATE clause, i.e. ``<column> COLLATE utf8_bin``"""
return collate(self, collation)
def op(self, operator):
"""produce a generic operator function.
e.g.::
somecolumn.op("*")(5)
produces::
somecolumn * 5
:param operator: a string which will be output as the infix operator
between this :class:`ClauseElement` and the expression passed to the
generated function.
This function can also be used to make bitwise operators explicit. For
example::
somecolumn.op('&')(0xff)
is a bitwise AND of the value in somecolumn.
"""
return lambda other: self.__operate(operator, other)
def _bind_param(self, operator, obj):
return _BindParamClause(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
def _check_literal(self, operator, other):
if isinstance(other, _BindParamClause) and \
isinstance(other.type, sqltypes.NullType):
# TODO: perhaps we should not mutate the incoming bindparam()
# here and instead make a copy of it. this might
# be the only place that we're mutating an incoming construct.
other.type = self.type
return other
elif hasattr(other, '__clause_element__'):
return other.__clause_element__()
elif not isinstance(other, ClauseElement):
return self._bind_param(operator, other)
elif isinstance(other, (_SelectBaseMixin, Alias)):
return other.as_scalar()
else:
return other
class ColumnElement(ClauseElement, _CompareMixin):
"""Represent an element that is usable within the "column clause" portion
of a ``SELECT`` statement.
This includes columns associated with tables, aliases, and
subqueries, expressions, function calls, SQL keywords such as
``NULL``, literals, etc. :class:`ColumnElement` is the ultimate base
class for all such elements.
:class:`ColumnElement` supports the ability to be a *proxy* element,
which indicates that the :class:`ColumnElement` may be associated with
a :class:`Selectable` which was derived from another :class:`Selectable`.
An example of a "derived" :class:`Selectable` is an :class:`Alias` of a
:class:`~sqlalchemy.schema.Table`.
A :class:`ColumnElement`, by subclassing the :class:`_CompareMixin` mixin
class, provides the ability to generate new :class:`ClauseElement`
objects using Python expressions. See the :class:`_CompareMixin`
docstring for more details.
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
quote = None
_label = None
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, 'proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, 'proxies'):
for c in self.proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`ColumnElement`
has a common ancestor to this :class:`ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _make_proxy(self, selectable, name=None):
"""Create a new :class:`ColumnElement` representing this
:class:`ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
# TODO: may want to change this to anon_label,
# or some value that is more useful than the
# compiled form of the expression
key = str(self)
else:
key = name
co = ColumnClause(name, selectable, type_=getattr(self,
'type', None))
co.proxies = [self]
selectable.columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the correponding set() pass the
comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif oth is self:
return True
else:
return False
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _generated_label('%%(%d %s)s' % (id(self), getattr(self,
'name', 'anon')))
class ColumnCollection(util.OrderedProperties):
"""An ordered dictionary that stores a list of ColumnElement
instances.
Overrides the ``__eq__()`` method to produce SQL clauses between
sets of correlated columns.
"""
def __init__(self, *cols):
super(ColumnCollection, self).__init__()
self.update((c.key, c) for c in cols)
def __str__(self):
return repr([str(c) for c in self])
def replace(self, column):
"""add the given column to this collection, removing unaliased
versions of this column as well as existing columns with the
same key.
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
Used by schema.Column to override columns during table reflection.
"""
if column.name in self and column.key != column.name:
other = self[column.name]
if other.name == other.key:
del self[other.name]
util.OrderedProperties.__setitem__(self, column.key, column)
def add(self, column):
"""Add a column to this collection.
The key attribute of the column will be used as the hash key
for this dictionary.
"""
self[column.key] = column
def __setitem__(self, key, value):
if key in self:
# this warning is primarily to catch select() statements
# which have conflicting column names in their exported
# columns collection
existing = self[key]
if not existing.shares_lineage(value):
util.warn('Column %r on table %r being replaced by '
'another column with the same key. Consider '
'use_labels for select() statements.' % (key,
getattr(existing, 'table', None)))
util.OrderedProperties.__setitem__(self, key, value)
def remove(self, column):
del self[column.key]
def extend(self, iter):
for c in iter:
self.add(c)
__hash__ = None
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __contains__(self, other):
if not isinstance(other, basestring):
raise exc.ArgumentError("__contains__ requires a string argument")
return util.OrderedProperties.__contains__(self, other)
def contains_column(self, col):
# have to use a Set here, because it will compare the identity
# of the column, not just using "==" for comparison which will
# always return a "True" value (i.e. a BinaryClause...)
return col in util.column_set(self)
class ColumnSet(util.ordered_column_set):
def contains_column(self, col):
return col in self
def extend(self, cols):
for col in cols:
self.add(col)
def __add__(self, other):
return list(self) + list(other)
def __eq__(self, other):
l = []
for c in other:
for local in self:
if c.shares_lineage(local):
l.append(c==local)
return and_(*l)
def __hash__(self):
return hash(tuple(x for x in self))
class Selectable(ClauseElement):
"""mark a class as being selectable"""
__visit_name__ = 'selectable'
class FromClause(Selectable):
"""Represent an element that can be used within the ``FROM``
clause of a ``SELECT`` statement.
"""
__visit_name__ = 'fromclause'
named_with_column = False
_hide_froms = []
quote = None
schema = None
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`FromClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def select(self, whereclause=None, **params):
"""return a SELECT of this :class:`FromClause`."""
return select([self], whereclause, **params)
def join(self, right, onclause=None, isouter=False):
"""return a join of this :class:`FromClause` against another
:class:`FromClause`."""
return Join(self, right, onclause, isouter)
def outerjoin(self, right, onclause=None):
"""return an outer join of this :class:`FromClause` against another
:class:`FromClause`."""
return Join(self, right, onclause, True)
def alias(self, name=None):
"""return an alias of this :class:`FromClause`.
For table objects, this has the effect of the table being rendered
as ``tablename AS aliasname`` in a SELECT statement.
For select objects, the effect is that of creating a named
subquery, i.e. ``(select ...) AS aliasname``.
The :func:`alias()` method is the general way to create
a "subquery" out of an existing SELECT.
The ``name`` parameter is optional, and if left blank an
"anonymous" name will be generated at compile time, guaranteed
to be unique against other anonymous constructs used in the
same statement.
"""
return Alias(self, name)
def is_derived_from(self, fromclause):
"""Return True if this FromClause is 'derived' from the given
FromClause.
An example would be an Alias of a Table is derived from that Table.
"""
return fromclause in self._cloned_set
def replace_selectable(self, old, alias):
"""replace all occurences of FromClause 'old' with the given Alias
object, returning a copy of this :class:`FromClause`.
"""
return sqlutil.ClauseAdapter(alias).traverse(self)
def correspond_on_equivalents(self, column, equivalents):
"""Return corresponding_column for the given column, or if None
search for a match in the given dictionary.
"""
col = self.corresponding_column(column, require_embedded=True)
if col is None and col in equivalents:
for equiv in equivalents[col]:
nc = self.corresponding_column(equiv, require_embedded=True)
if nc:
return nc
return col
def corresponding_column(self, column, require_embedded=False):
"""Given a :class:`ColumnElement`, return the exported
:class:`ColumnElement` object from this :class:`Selectable`
which corresponds to that original
:class:`~sqlalchemy.schema.Column` via a common anscestor
column.
:param column: the target :class:`ColumnElement` to be matched
:param require_embedded: only return corresponding columns for
the given :class:`ColumnElement`, if the given
:class:`ColumnElement` is actually present within a sub-element
of this :class:`FromClause`. Normally the column will match if
it merely shares a common anscestor with one of the exported
columns of this :class:`FromClause`.
"""
# dont dig around if the column is locally present
if self.c.contains_column(column):
return column
col, intersect = None, None
target_set = column.proxy_set
cols = self.c
for c in cols:
i = target_set.intersection(itertools.chain(*[p._cloned_set
for p in c.proxy_set]))
if i and (not require_embedded
or c.proxy_set.issuperset(target_set)):
if col is None:
# no corresponding column yet, pick this one.
col, intersect = c, i
elif len(i) > len(intersect):
# 'c' has a larger field of correspondence than
# 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x
# matches a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
col, intersect = c, i
elif i == intersect:
# they have the same field of correspondence. see
# which proxy_set has fewer columns in it, which
# indicates a closer relationship with the root
# column. Also take into account the "weight"
# attribute which CompoundSelect() uses to give
# higher precedence to columns based on vertical
# position in the compound statement, and discard
# columns that have no reference to the target
# column (also occurs with CompoundSelect)
col_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
col.proxy_set if sc.shares_lineage(column)])
c_distance = util.reduce(operator.add,
[sc._annotations.get('weight', 1) for sc in
c.proxy_set if sc.shares_lineage(column)])
if c_distance < col_distance:
col, intersect = c, i
return col
@property
def description(self):
"""a brief description of this FromClause.
Used primarily for error message formatting.
"""
return getattr(self, 'name', self.__class__.__name__ + " object")
def _reset_exported(self):
"""delete memoized collections when a FromClause is cloned."""
for attr in '_columns', '_primary_key', '_foreign_keys', \
'locate_all_froms':
self.__dict__.pop(attr, None)
@util.memoized_property
def _columns(self):
"""Return the collection of Column objects contained by this
FromClause."""
self._export_columns()
return self._columns
@util.memoized_property
def _primary_key(self):
"""Return the collection of Column objects which comprise the
primary key of this FromClause."""
self._export_columns()
return self._primary_key
@util.memoized_property
def _foreign_keys(self):
"""Return the collection of ForeignKey objects which this
FromClause references."""
self._export_columns()
return self._foreign_keys
columns = property(attrgetter('_columns'), doc=_columns.__doc__)
primary_key = property(attrgetter('_primary_key'),
doc=_primary_key.__doc__)
foreign_keys = property(attrgetter('_foreign_keys'),
doc=_foreign_keys.__doc__)
# synonyms for 'columns'
c = _select_iterable = property(attrgetter('columns'),
doc=_columns.__doc__)
def _export_columns(self):
"""Initialize column collections."""
self._columns = ColumnCollection()
self._primary_key = ColumnSet()
self._foreign_keys = set()
self._populate_column_collection()
def _populate_column_collection(self):
pass
class _BindParamClause(ColumnElement):
"""Represent a bind parameter.
Public constructor is the :func:`bindparam()` function.
"""
__visit_name__ = 'bindparam'
quote = None
def __init__(self, key, value, type_=None, unique=False,
isoutparam=False, required=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Construct a _BindParamClause.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`_BindParamClause` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`_BindParamClause` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`_BindParamClause` of the same name
already has been located within the containing
:class:`ClauseElement`.
:param required:
a value is required at execution time.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter.
"""
if unique:
self.key = _generated_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _generated_label('%%(%d param)s'
% id(self))
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type._coerce_compared_value(
_compared_to_operator, value)
else:
self.type = sqltypes.type_map.get(type(value),
sqltypes.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _generated_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _generated_label('%%(%d %s)s' % (id(self),
self._orig_key or 'param'))
def bind_processor(self, dialect):
return self.type.dialect_impl(dialect).bind_processor(dialect)
def compare(self, other, **kw):
"""Compare this :class:`_BindParamClause` to the given
clause."""
return isinstance(other, _BindParamClause) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if util.callable(v):
v = v()
d['value'] = v
return d
def __repr__(self):
return '_BindParamClause(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class _TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class _Generative(object):
"""Allow a ClauseElement to generate itself via the
@_generative decorator.
"""
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
return s
class Executable(_Generative):
"""Mark a ClauseElement as supporting execution.
:class:`Executable` is a superclass for all "statement" types
of objects, including :func:`select`, :func:`delete`, :func:`update`,
:func:`insert`, :func:`text`.
"""
supports_execution = True
_execution_options = util.frozendict()
@_generative
def execution_options(self, **kw):
""" Set non-SQL options for the statement which take effect during
execution.
Current options include:
* autocommit - when True, a COMMIT will be invoked after execution
when executed in 'autocommit' mode, i.e. when an explicit
transaction is not begun on the connection. Note that DBAPI
connections by default are always in a transaction - SQLAlchemy uses
rules applied to different kinds of statements to determine if
COMMIT will be invoked in order to provide its "autocommit" feature.
Typically, all INSERT/UPDATE/DELETE statements as well as
CREATE/DROP statements have autocommit behavior enabled; SELECT
constructs do not. Use this option when invoking a SELECT or other
specific SQL construct where COMMIT is desired (typically when
calling stored procedures and such).
* stream_results - indicate to the dialect that results should be
"streamed" and not pre-buffered, if possible. This is a limitation
of many DBAPIs. The flag is currently understood only by the
psycopg2 dialect.
* compiled_cache - a dictionary where :class:`Compiled` objects
will be cached when the :class:`Connection` compiles a clause
expression into a dialect- and parameter-specific
:class:`Compiled` object. It is the user's responsibility to
manage the size of this dictionary, which will have keys
corresponding to the dialect, clause element, the column
names within the VALUES or SET clause of an INSERT or UPDATE,
as well as the "batch" mode for an INSERT or UPDATE statement.
The format of this dictionary is not guaranteed to stay the
same in future releases.
This option is usually more appropriate
to use via the
:meth:`sqlalchemy.engine.base.Connection.execution_options()`
method of :class:`Connection`, rather than upon individual
statement objects, though the effect is the same.
See also:
:meth:`sqlalchemy.engine.base.Connection.execution_options()`
:meth:`sqlalchemy.orm.query.Query.execution_options()`
"""
self._execution_options = self._execution_options.union(kw)
def execute(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`."""
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
msg = ('This %s is not bound and does not support direct '
'execution. Supply this statement to a Connection or '
'Engine for execution. Or, assign a bind to the statement '
'or the Metadata of its underlying tables to enable '
'implicit execution via this method.' % label)
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Compile and execute this :class:`.Executable`, returning the
result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
# legacy, some outside users may be calling this
_Executable = Executable
class _TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
Public constructor is the :func:`text()` function.
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union({'autocommit'
: PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
_hide_froms = []
def __init__(
self,
text='',
bind=None,
bindparams=None,
typemap=None,
autocommit=None,
):
self._bind = bind
self.bindparams = {}
self.typemap = typemap
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=Tru'
'e)')
self._execution_options = \
self._execution_options.union({'autocommit'
: autocommit})
if typemap is not None:
for key in typemap.keys():
typemap[key] = sqltypes.to_instance(typemap[key])
def repl(m):
self.bindparams[m.group(1)] = bindparam(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
if bindparams is not None:
for b in bindparams:
self.bindparams[b.key] = b
@property
def type(self):
if self.typemap is not None and len(self.typemap) == 1:
return list(self.typemap)[0]
else:
return sqltypes.NULLTYPE
def self_group(self, against=None):
if against is operators.in_op:
return _Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone):
self.bindparams = dict((b.key, clone(b))
for b in self.bindparams.values())
def get_children(self, **kwargs):
return self.bindparams.values()
class _Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
Public constructor is the :func:`null()` function.
"""
__visit_name__ = 'null'
def __init__(self):
self.type = sqltypes.NULLTYPE
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses if clause is not None]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses if clause is not None]
@util.memoized_property
def type(self):
if self.clauses:
return self.clauses[0].type
else:
return sqltypes.NULLTYPE
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
# TODO: not sure if i like the 'group_contents' flag. need to
# define the difference between a ClauseList of ClauseLists,
# and a "flattened" ClauseList of ClauseLists. flatten()
# method ?
if self.group_contents:
self.clauses.append(_literal_as_text(clause).\
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone):
self.clauses = [clone(clause) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`ClauseList` to the given :class:`ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
super(BooleanClauseList, self).__init__(*clauses, **kwargs)
self.type = sqltypes.to_instance(kwargs.get('type_',
sqltypes.Boolean))
@property
def _select_iterable(self):
return (self, )
class _Tuple(ClauseList, ColumnElement):
def __init__(self, *clauses, **kw):
clauses = [_literal_as_binds(c) for c in clauses]
super(_Tuple, self).__init__(*clauses, **kw)
self.type = _type_from_args(clauses)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return _Tuple(*[
_BindParamClause(None, o, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
for o in obj
]).self_group()
class _Case(ColumnElement):
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone):
if self.value is not None:
self.value = clone(self.value)
self.whens = [(clone(x), clone(y)) for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
class FunctionElement(Executable, ColumnElement, FromClause):
"""Base for SQL function-oriented constructs."""
def __init__(self, *clauses, **kwargs):
args = [_literal_as_binds(c, self.name) for c in clauses]
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *args).\
self_group()
@property
def columns(self):
return [self]
@util.memoized_property
def clauses(self):
return self.clause_expr.element
@property
def _from_objects(self):
return self.clauses._from_objects
def get_children(self, **kwargs):
return self.clause_expr,
def _copy_internals(self, clone=_clone):
self.clause_expr = clone(self.clause_expr)
self._reset_exported()
util.reset_memoized(self, 'clauses')
def select(self):
s = select([self])
if self._execution_options:
s = s.execution_options(**self._execution_options)
return s
def scalar(self):
return self.select().execute().scalar()
def execute(self):
return self.select().execute()
def _bind_param(self, operator, obj):
return _BindParamClause(None, obj, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
class Function(FunctionElement):
"""Describe a named SQL function."""
__visit_name__ = 'function'
def __init__(self, name, *clauses, **kw):
self.packagenames = kw.pop('packagenames', None) or []
self.name = name
self._bind = kw.get('bind', None)
self.type = sqltypes.to_instance(kw.get('type_', None))
FunctionElement.__init__(self, *clauses, **kw)
def _bind_param(self, operator, obj):
return _BindParamClause(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
class _Cast(ColumnElement):
__visit_name__ = 'cast'
def __init__(self, clause, totype, **kwargs):
self.type = sqltypes.to_instance(totype)
self.clause = _literal_as_binds(clause, None)
self.typeclause = _TypeClause(self.type)
def _copy_internals(self, clone=_clone):
self.clause = clone(self.clause)
self.typeclause = clone(self.typeclause)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class _Extract(ColumnElement):
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
self.type = sqltypes.Integer()
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone):
self.expr = clone(self.expr)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class _UnaryExpression(ColumnElement):
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = _literal_as_text(element).\
self_group(against=self.operator or self.modifier)
self.type = sqltypes.to_instance(type_)
self.negate = negate
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`_UnaryExpression` against the given
:class:`ClauseElement`."""
return (
isinstance(other, _UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return _UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return super(_UnaryExpression, self)._negate()
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator,
against):
return _Grouping(self)
else:
return self
class _BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``."""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
self.left = _literal_as_text(left).self_group(against=operator)
self.right = _literal_as_text(right).self_group(against=operator)
self.operator = operator
self.type = sqltypes.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __nonzero__(self):
try:
return self.operator(hash(self.left), hash(self.right))
except:
raise TypeError("Boolean value of this clause is not defined")
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone):
self.left = clone(self.left)
self.right = clone(self.right)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`_BinaryExpression` against the
given :class:`_BinaryExpression`."""
return (
isinstance(other, _BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return _Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return _BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=sqltypes.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(_BinaryExpression, self)._negate()
class _Exists(_UnaryExpression):
__visit_name__ = _UnaryExpression.__visit_name__
_from_objects = []
def __init__(self, *args, **kwargs):
if args and isinstance(args[0], (_SelectBaseMixin, _ScalarSelect)):
s = args[0]
else:
if not args:
args = ([literal_column('*')],)
s = select(*args, **kwargs).as_scalar().self_group()
_UnaryExpression.__init__(self, s, operator=operators.exists,
type_=sqltypes.Boolean)
def select(self, whereclause=None, **params):
return select([self], whereclause, **params)
def correlate(self, fromclause):
e = self._clone()
e.element = self.element.correlate(fromclause).self_group()
return e
def select_from(self, clause):
"""return a new exists() construct with the given expression set as
its FROM clause.
"""
e = self._clone()
e.element = self.element.select_from(clause).self_group()
return e
def where(self, clause):
"""return a new exists() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
e = self._clone()
e.element = self.element.where(clause).self_group()
return e
class Join(FromClause):
"""represent a ``JOIN`` construct between two :class:`FromClause`
elements.
The public constructor function for :class:`Join` is the module-level
:func:`join()` function, as well as the :func:`join()` method available
off all :class:`FromClause` subclasses.
"""
__visit_name__ = 'join'
def __init__(self, left, right, onclause=None, isouter=False):
self.left = _literal_as_text(left)
self.right = _literal_as_text(right).self_group()
if onclause is None:
self.onclause = self._match_primaries(self.left, self.right)
else:
self.onclause = onclause
self.isouter = isouter
self.__folded_equivalents = None
@property
def description(self):
return "Join object on %s(%d) and %s(%d)" % (
self.left.description,
id(self.left),
self.right.description,
id(self.right))
def is_derived_from(self, fromclause):
return fromclause is self or \
self.left.is_derived_from(fromclause) or\
self.right.is_derived_from(fromclause)
def self_group(self, against=None):
return _FromGrouping(self)
def _populate_column_collection(self):
columns = [c for c in self.left.columns] + \
[c for c in self.right.columns]
self._primary_key.extend(sqlutil.reduce_columns(
(c for c in columns if c.primary_key), self.onclause))
self._columns.update((col._label, col) for col in columns)
self._foreign_keys.update(itertools.chain(
*[col.foreign_keys for col in columns]))
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.left = clone(self.left)
self.right = clone(self.right)
self.onclause = clone(self.onclause)
self.__folded_equivalents = None
def get_children(self, **kwargs):
return self.left, self.right, self.onclause
def _match_primaries(self, left, right):
if isinstance(left, Join):
left_right = left.right
else:
left_right = None
return sqlutil.join_condition(left, right, a_subset=left_right)
def select(self, whereclause=None, fold_equivalents=False, **kwargs):
"""Create a :class:`Select` from this :class:`Join`.
:param whereclause: the WHERE criterion that will be sent to
the :func:`select()` function
:param fold_equivalents: based on the join criterion of this
:class:`Join`, do not include
repeat column names in the column list of the resulting
select, for columns that are calculated to be "equivalent"
based on the join criterion of this :class:`Join`. This will
recursively apply to any joins directly nested by this one
as well.
:param \**kwargs: all other kwargs are sent to the
underlying :func:`select()` function.
"""
if fold_equivalents:
collist = sqlutil.folded_equivalents(self)
else:
collist = [self.left, self.right]
return select(collist, whereclause, from_obj=[self], **kwargs)
@property
def bind(self):
return self.left.bind or self.right.bind
def alias(self, name=None):
"""Create a :class:`Select` out of this :class:`Join` clause and
return an :class:`Alias` of it.
The :class:`Select` is not correlating.
"""
return self.select(use_labels=True, correlate=False).alias(name)
@property
def _hide_froms(self):
return itertools.chain(*[_from_objects(x.left, x.right)
for x in self._cloned_set])
@property
def _from_objects(self):
return [self] + \
self.onclause._from_objects + \
self.left._from_objects + \
self.right._from_objects
class Alias(FromClause):
"""Represents an table or selectable alias (AS).
Represents an alias, as typically applied to any table or
sub-select within a SQL statement using the ``AS`` keyword (or
without the keyword on certain databases such as Oracle).
This object is constructed from the :func:`alias()` module level
function as well as the :func:`alias()` method available on all
:class:`FromClause` subclasses.
"""
__visit_name__ = 'alias'
named_with_column = True
def __init__(self, selectable, alias=None):
baseselectable = selectable
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
self.supports_execution = baseselectable.supports_execution
if self.supports_execution:
self._execution_options = baseselectable._execution_options
self.element = selectable
if alias is None:
if self.original.named_with_column:
alias = getattr(self.original, 'name', None)
alias = _generated_label('%%(%d %s)s' % (id(self), alias
or 'anon'))
self.name = alias
@property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def as_scalar(self):
try:
return self.element.as_scalar()
except AttributeError:
raise AttributeError("Element %s does not support "
"'as_scalar()'" % self.element)
def is_derived_from(self, fromclause):
if fromclause in self._cloned_set:
return True
return self.element.is_derived_from(fromclause)
def _populate_column_collection(self):
for col in self.element.columns:
col._make_proxy(self)
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.element = _clone(self.element)
baseselectable = self.element
while isinstance(baseselectable, Alias):
baseselectable = baseselectable.element
self.original = baseselectable
def get_children(self, column_collections=True,
aliased_selectables=True, **kwargs):
if column_collections:
for c in self.c:
yield c
if aliased_selectables:
yield self.element
@property
def _from_objects(self):
return [self]
@property
def bind(self):
return self.element.bind
class _Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', None)
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element, 'type':self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
class _FromGrouping(FromClause):
"""Represent a grouping of a FROM clause"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
@property
def columns(self):
return self.element.columns
@property
def _hide_froms(self):
return self.element._hide_froms
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element':self.element}
def __setstate__(self, state):
self.element = state['element']
class _Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
This object is constructed from the :func:`label()` module level
function as well as the :func:`label()` method available on all
:class:`ColumnElement` subclasses.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
while isinstance(element, _Label):
element = element.element
self.name = self.key = self._label = name \
or _generated_label('%%(%d %s)s' % (id(self),
getattr(element, 'name', 'anon')))
self._element = element
self._type = type_
self.quote = element.quote
self.proxies = [element]
@util.memoized_property
def type(self):
return sqltypes.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return _Label(self.name,
sub_element,
type_=self._type)
else:
return self._element
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone):
self.element = clone(self.element)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name = None):
if isinstance(self.element, (Selectable, ColumnElement)):
e = self.element._make_proxy(selectable, name=self.name)
else:
e = column(self.name)._make_proxy(selectable=selectable)
e.proxies.append(self)
return e
class ColumnClause(_Immutable, ColumnElement):
"""Represents a generic column expression from any textual string.
This includes columns associated with tables, aliases and select
statements, but also any arbitrary text. May or may not be bound
to an underlying :class:`Selectable`. :class:`ColumnClause` is usually
created publically via the :func:`column()` function or the
:func:`literal_column()` function.
text
the text of the element.
selectable
parent selectable.
type
``TypeEngine`` object which can associate this :class:`ColumnClause`
with a type.
is_literal
if True, the :class:`ColumnClause` is assumed to be an exact
expression that will be delivered to the output with no quoting
rules applied regardless of case sensitive settings. the
:func:`literal_column()` function is usually used to create such a
:class:`ColumnClause`.
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
def __init__(self, text, selectable=None, type_=None, is_literal=False):
self.key = self.name = text
self.table = selectable
self.type = sqltypes.to_instance(type_)
self.is_literal = is_literal
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
@util.memoized_property
def _label(self):
if self.is_literal:
return None
elif self.table is not None and self.table.named_with_column:
if getattr(self.table, 'schema', None):
label = self.table.schema.replace('.', '_') + "_" + \
_escape_for_generated(self.table.name) + "_" + \
_escape_for_generated(self.name)
else:
label = _escape_for_generated(self.table.name) + "_" + \
_escape_for_generated(self.name)
# ensure the label name doesn't conflict with that
# of an existing column
if label in self.table.c:
_label = label
counter = 1
while _label in self.table.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _generated_label(label)
else:
return self.name
def label(self, name):
if name is None:
return self
else:
return super(ColumnClause, self).label(name)
@property
def _from_objects(self):
if self.table is not None:
return [self.table]
else:
return []
def _bind_param(self, operator, obj):
return _BindParamClause(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
name or self.name,
selectable=selectable,
type_=self.type,
is_literal=is_literal
)
c.proxies = [self]
if attach:
selectable.columns[c.name] = c
return c
class TableClause(_Immutable, FromClause):
"""Represents a "table" construct.
Note that this represents tables only as another syntactical
construct within SQL expressions; it does not provide schema-level
functionality.
"""
__visit_name__ = 'table'
named_with_column = True
def __init__(self, name, *columns):
super(TableClause, self).__init__()
self.name = self.fullname = name
self._columns = ColumnCollection()
self._primary_key = ColumnSet()
self._foreign_keys = set()
for c in columns:
self.append_column(c)
def _export_columns(self):
raise NotImplementedError()
@util.memoized_property
def description(self):
# Py3K
#return self.name
# Py2K
return self.name.encode('ascii', 'backslashreplace')
# end Py2K
def append_column(self, c):
self._columns[c.name] = c
c.table = self
def get_children(self, column_collections=True, **kwargs):
if column_collections:
return [c for c in self.c]
else:
return []
def count(self, whereclause=None, **params):
"""return a SELECT COUNT generated against this
:class:`TableClause`."""
if self.primary_key:
col = list(self.primary_key)[0]
else:
col = list(self.columns)[0]
return select(
[func.count(col).label('tbl_row_count')],
whereclause,
from_obj=[self],
**params)
def insert(self, values=None, inline=False, **kwargs):
"""Generate an :func:`insert()` construct."""
return insert(self, values=values, inline=inline, **kwargs)
def update(self, whereclause=None, values=None, inline=False, **kwargs):
"""Generate an :func:`update()` construct."""
return update(self, whereclause=whereclause,
values=values, inline=inline, **kwargs)
def delete(self, whereclause=None, **kwargs):
"""Generate a :func:`delete()` construct."""
return delete(self, whereclause, **kwargs)
@property
def _from_objects(self):
return [self]
class _SelectBaseMixin(Executable):
"""Base class for :class:`Select` and ``CompoundSelects``."""
def __init__(self,
use_labels=False,
for_update=False,
limit=None,
offset=None,
order_by=None,
group_by=None,
bind=None,
autocommit=None):
self.use_labels = use_labels
self.for_update = for_update
if autocommit is not None:
util.warn_deprecated('autocommit on select() is '
'deprecated. Use .execution_options(a'
'utocommit=True)')
self._execution_options = \
self._execution_options.union({'autocommit'
: autocommit})
self._limit = limit
self._offset = offset
self._bind = bind
self._order_by_clause = ClauseList(*util.to_list(order_by) or [])
self._group_by_clause = ClauseList(*util.to_list(group_by) or [])
def as_scalar(self):
"""return a 'scalar' representation of this selectable, which can be
used as a column expression.
Typically, a select statement which has only one column in its columns
clause is eligible to be used as a scalar expression.
The returned object is an instance of
:class:`_ScalarSelect`.
"""
return _ScalarSelect(self)
@_generative
def apply_labels(self):
"""return a new selectable with the 'use_labels' flag set to True.
This will result in column expressions being generated using labels
against their table name, such as "SELECT somecolumn AS
tablename_somecolumn". This allows selectables which contain multiple
FROM clauses to produce a unique set of column names regardless of
name conflicts among the individual FROM clauses.
"""
self.use_labels = True
def label(self, name):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
See also ``as_scalar()``.
"""
return self.as_scalar().label(name)
@_generative
@util.deprecated('0.6',
message=":func:`.autocommit` is deprecated. Use "
":func:`.Executable.execution_options` with the "
"'autocommit' flag.")
def autocommit(self):
"""return a new selectable with the 'autocommit' flag set to
True."""
self._execution_options = \
self._execution_options.union({'autocommit': True})
def _generate(self):
"""Override the default _generate() method to also clear out
exported collections."""
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s._reset_exported()
return s
@_generative
def limit(self, limit):
"""return a new selectable with the given LIMIT criterion
applied."""
self._limit = limit
@_generative
def offset(self, offset):
"""return a new selectable with the given OFFSET criterion
applied."""
self._offset = offset
@_generative
def order_by(self, *clauses):
"""return a new selectable with the given list of ORDER BY
criterion applied.
The criterion will be appended to any pre-existing ORDER BY
criterion.
"""
self.append_order_by(*clauses)
@_generative
def group_by(self, *clauses):
"""return a new selectable with the given list of GROUP BY
criterion applied.
The criterion will be appended to any pre-existing GROUP BY
criterion.
"""
self.append_group_by(*clauses)
def append_order_by(self, *clauses):
"""Append the given ORDER BY criterion applied to this selectable.
The criterion will be appended to any pre-existing ORDER BY criterion.
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
else:
if getattr(self, '_order_by_clause', None) is not None:
clauses = list(self._order_by_clause) + list(clauses)
self._order_by_clause = ClauseList(*clauses)
def append_group_by(self, *clauses):
"""Append the given GROUP BY criterion applied to this selectable.
The criterion will be appended to any pre-existing GROUP BY criterion.
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
else:
if getattr(self, '_group_by_clause', None) is not None:
clauses = list(self._group_by_clause) + list(clauses)
self._group_by_clause = ClauseList(*clauses)
@property
def _from_objects(self):
return [self]
class _ScalarSelect(_Grouping):
_from_objects = []
def __init__(self, element):
self.element = element
self.type = element._scalar_type()
@property
def columns(self):
raise exc.InvalidRequestError('Scalar Select expression has no '
'columns; use this object directly within a '
'column-level expression.')
c = columns
def self_group(self, **kwargs):
return self
def _make_proxy(self, selectable, name):
return list(self.inner_columns)[0]._make_proxy(selectable, name)
class CompoundSelect(_SelectBaseMixin, FromClause):
"""Forms the basis of ``UNION``, ``UNION ALL``, and other
SELECT-based set operations."""
__visit_name__ = 'compound_select'
UNION = util.symbol('UNION')
UNION_ALL = util.symbol('UNION ALL')
EXCEPT = util.symbol('EXCEPT')
EXCEPT_ALL = util.symbol('EXCEPT ALL')
INTERSECT = util.symbol('INTERSECT')
INTERSECT_ALL = util.symbol('INTERSECT ALL')
def __init__(self, keyword, *selects, **kwargs):
self._should_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
numcols = None
# some DBs do not like ORDER BY in the inner queries of a UNION, etc.
for n, s in enumerate(selects):
s = _clause_element_as_expr(s)
if not numcols:
numcols = len(s.c)
elif len(s.c) != numcols:
raise exc.ArgumentError('All selectables passed to '
'CompoundSelect must have identical numbers of '
'columns; select #%d has %d columns, select '
'#%d has %d' % (1, len(self.selects[0].c), n
+ 1, len(s.c)))
self.selects.append(s.self_group(self))
_SelectBaseMixin.__init__(self, **kwargs)
def _scalar_type(self):
return self.selects[0]._scalar_type()
def self_group(self, against=None):
return _FromGrouping(self)
def is_derived_from(self, fromclause):
for s in self.selects:
if s.is_derived_from(fromclause):
return True
return False
def _populate_column_collection(self):
for cols in zip(*[s.c for s in self.selects]):
# this is a slightly hacky thing - the union exports a
# column that resembles just that of the *first* selectable.
# to get at a "composite" column, particularly foreign keys,
# you have to dig through the proxies collection which we
# generate below. We may want to improve upon this, such as
# perhaps _make_proxy can accept a list of other columns
# that are "shared" - schema.column can then copy all the
# ForeignKeys in. this would allow the union() to have all
# those fks too.
proxy = cols[0]._make_proxy(self, name=self.use_labels
and cols[0]._label or None)
# hand-construct the "proxies" collection to include all
# derived columns place a 'weight' annotation corresponding
# to how low in the list of select()s the column occurs, so
# that the corresponding_column() operation can resolve
# conflicts
proxy.proxies = [c._annotate({'weight': i + 1}) for (i,
c) in enumerate(cols)]
def _copy_internals(self, clone=_clone):
self._reset_exported()
self.selects = [clone(s) for s in self.selects]
if hasattr(self, '_col_map'):
del self._col_map
for attr in ('_order_by_clause', '_group_by_clause'):
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr)))
def get_children(self, column_collections=True, **kwargs):
return (column_collections and list(self.c) or []) \
+ [self._order_by_clause, self._group_by_clause] \
+ list(self.selects)
def bind(self):
if self._bind:
return self._bind
for s in self.selects:
e = s.bind
if e:
return e
else:
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class Select(_SelectBaseMixin, FromClause):
"""Represents a ``SELECT`` statement.
Select statements support appendable clauses, as well as the
ability to execute themselves and return a result set.
"""
__visit_name__ = 'select'
_prefixes = ()
_hints = util.frozendict()
def __init__(self,
columns,
whereclause=None,
from_obj=None,
distinct=False,
having=None,
correlate=True,
prefixes=None,
**kwargs):
"""Construct a Select object.
The public constructor for Select is the
:func:`select` function; see that function for
argument descriptions.
Additional generative and mutator methods are available on the
:class:`_SelectBaseMixin` superclass.
"""
self._should_correlate = correlate
self._distinct = distinct
self._correlate = set()
self._froms = util.OrderedSet()
try:
cols_present = bool(columns)
except TypeError:
raise exc.ArgumentError("columns argument to select() must "
"be a Python list or other iterable")
if cols_present:
self._raw_columns = []
for c in columns:
c = _literal_as_column(c)
if isinstance(c, _ScalarSelect):
c = c.self_group(against=operators.comma_op)
self._raw_columns.append(c)
self._froms.update(_from_objects(*self._raw_columns))
else:
self._raw_columns = []
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
self._froms.update(_from_objects(self._whereclause))
else:
self._whereclause = None
if from_obj is not None:
for f in util.to_list(from_obj):
if _is_literal(f):
self._froms.add(_TextClause(f))
else:
self._froms.add(f)
if having is not None:
self._having = _literal_as_text(having)
else:
self._having = None
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
_SelectBaseMixin.__init__(self, **kwargs)
def _get_display_froms(self, existing_froms=None):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
rendered in the FROM clause of enclosing selects; this Select
may want to leave those absent if it is automatically
correlating.
"""
froms = self._froms
toremove = itertools.chain(*[f._hide_froms for f in froms])
if toremove:
froms = froms.difference(toremove)
if len(froms) > 1 or self._correlate:
if self._correlate:
froms = froms.difference(_cloned_intersection(froms,
self._correlate))
if self._should_correlate and existing_froms:
froms = froms.difference(_cloned_intersection(froms,
existing_froms))
if not len(froms):
raise exc.InvalidRequestError("Select statement '%s"
"' returned no FROM clauses due to "
"auto-correlation; specify "
"correlate(<tables>) to control "
"correlation manually." % self)
return froms
def _scalar_type(self):
elem = self._raw_columns[0]
cols = list(elem._select_iterable)
return cols[0].type
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
return self._get_display_froms()
@_generative
def with_hint(self, selectable, text, dialect_name='*'):
"""Add an indexing hint for the given selectable to this
:class:`Select`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the given :class:`.Table` or :class:`.Alias` passed as the
*selectable* argument. The dialect implementation
typically uses Python string substitution syntax
with the token ``%(name)s`` to render the name of
the table or alias. E.g. when using Oracle, the
following::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)")
Would render SQL as::
select /*+ index(mytable ix_mytable) */ ... from mytable
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add hints for both Oracle
and Sybase simultaneously::
select([mytable]).\\
with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\\
with_hint(mytable, "WITH INDEX ix_mytable", 'sybase')
"""
self._hints = self._hints.union({(selectable, dialect_name):text})
@property
def type(self):
raise exc.InvalidRequestError("Select objects don't have a type. "
"Call as_scalar() on this Select object "
"to return a 'scalar' version of this Select.")
@util.memoized_instancemethod
def locate_all_froms(self):
"""return a Set of all FromClause elements referenced by this Select.
This set is a superset of that returned by the ``froms`` property,
which is specifically for those FromClause elements that would
actually be rendered.
"""
return self._froms.union(_from_objects(*list(self._froms)))
@property
def inner_columns(self):
"""an iterator of all ColumnElement expressions which would
be rendered into the columns clause of the resulting SELECT statement.
"""
return _select_iterables(self._raw_columns)
def is_derived_from(self, fromclause):
if self in fromclause._cloned_set:
return True
for f in self.locate_all_froms():
if f.is_derived_from(fromclause):
return True
return False
def _copy_internals(self, clone=_clone):
self._reset_exported()
from_cloned = dict((f, clone(f))
for f in self._froms.union(self._correlate))
self._froms = util.OrderedSet(from_cloned[f] for f in self._froms)
self._correlate = set(from_cloned[f] for f in self._correlate)
self._raw_columns = [clone(c) for c in self._raw_columns]
for attr in '_whereclause', '_having', '_order_by_clause', \
'_group_by_clause':
if getattr(self, attr) is not None:
setattr(self, attr, clone(getattr(self, attr)))
def get_children(self, column_collections=True, **kwargs):
"""return child elements as per the ClauseElement specification."""
return (column_collections and list(self.columns) or []) + \
self._raw_columns + list(self._froms) + \
[x for x in
(self._whereclause, self._having,
self._order_by_clause, self._group_by_clause)
if x is not None]
@_generative
def column(self, column):
"""return a new select() construct with the given column expression
added to its columns clause.
"""
column = _literal_as_column(column)
if isinstance(column, _ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
self._froms = self._froms.union(_from_objects(column))
@_generative
def with_only_columns(self, columns):
"""return a new select() construct with its columns clause replaced
with the given columns.
"""
self._raw_columns = [
isinstance(c, _ScalarSelect) and
c.self_group(against=operators.comma_op) or c
for c in [_literal_as_column(c) for c in columns]
]
@_generative
def where(self, whereclause):
"""return a new select() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
self.append_whereclause(whereclause)
@_generative
def having(self, having):
"""return a new select() construct with the given expression added to
its HAVING clause, joined to the existing clause via AND, if any.
"""
self.append_having(having)
@_generative
def distinct(self):
"""return a new select() construct which will apply DISTINCT to its
columns clause.
"""
self._distinct = True
@_generative
def prefix_with(self, clause):
"""return a new select() construct which will apply the given
expression to the start of its columns clause, not using any commas.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
@_generative
def select_from(self, fromclause):
"""return a new select() construct with the given FROM expression
applied to its list of FROM objects.
"""
fromclause = _literal_as_text(fromclause)
self._froms = self._froms.union([fromclause])
@_generative
def correlate(self, *fromclauses):
"""return a new select() construct which will correlate the given FROM
clauses to that of an enclosing select(), if a match is found.
By "match", the given fromclause must be present in this select's
list of FROM objects and also present in an enclosing select's list of
FROM objects.
Calling this method turns off the select's default behavior of
"auto-correlation". Normally, select() auto-correlates all of its FROM
clauses to those of an embedded select when compiled.
If the fromclause is None, correlation is disabled for the returned
select().
"""
self._should_correlate = False
if fromclauses == (None,):
self._correlate = set()
else:
self._correlate = self._correlate.union(fromclauses)
def append_correlation(self, fromclause):
"""append the given correlation expression to this select()
construct."""
self._should_correlate = False
self._correlate = self._correlate.union([fromclause])
def append_column(self, column):
"""append the given column expression to the columns clause of this
select() construct.
"""
column = _literal_as_column(column)
if isinstance(column, _ScalarSelect):
column = column.self_group(against=operators.comma_op)
self._raw_columns = self._raw_columns + [column]
self._froms = self._froms.union(_from_objects(column))
self._reset_exported()
def append_prefix(self, clause):
"""append the given columns clause prefix expression to this select()
construct.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
def append_whereclause(self, whereclause):
"""append the given expression to this select() construct's WHERE
criterion.
The expression will be joined to existing WHERE criterion via AND.
"""
whereclause = _literal_as_text(whereclause)
self._froms = self._froms.union(_from_objects(whereclause))
if self._whereclause is not None:
self._whereclause = and_(self._whereclause, whereclause)
else:
self._whereclause = whereclause
def append_having(self, having):
"""append the given expression to this select() construct's HAVING
criterion.
The expression will be joined to existing HAVING criterion via AND.
"""
if self._having is not None:
self._having = and_(self._having, _literal_as_text(having))
else:
self._having = _literal_as_text(having)
def append_from(self, fromclause):
"""append the given FromClause expression to this select() construct's
FROM clause.
"""
if _is_literal(fromclause):
fromclause = _TextClause(fromclause)
self._froms = self._froms.union([fromclause])
def __exportable_columns(self):
for column in self._raw_columns:
if isinstance(column, Selectable):
for co in column.columns:
yield co
elif isinstance(column, ColumnElement):
yield column
else:
continue
def _populate_column_collection(self):
for c in self.__exportable_columns():
c._make_proxy(self, name=self.use_labels and c._label or None)
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement
specification.
This produces an element that can be embedded in an expression. Note
that this method is called automatically as needed when constructing
expressions.
"""
if isinstance(against, CompoundSelect):
return self
return _FromGrouping(self)
def union(self, other, **kwargs):
"""return a SQL UNION of this select() construct against the given
selectable."""
return union(self, other, **kwargs)
def union_all(self, other, **kwargs):
"""return a SQL UNION ALL of this select() construct against the given
selectable.
"""
return union_all(self, other, **kwargs)
def except_(self, other, **kwargs):
"""return a SQL EXCEPT of this select() construct against the given
selectable."""
return except_(self, other, **kwargs)
def except_all(self, other, **kwargs):
"""return a SQL EXCEPT ALL of this select() construct against the
given selectable.
"""
return except_all(self, other, **kwargs)
def intersect(self, other, **kwargs):
"""return a SQL INTERSECT of this select() construct against the given
selectable.
"""
return intersect(self, other, **kwargs)
def intersect_all(self, other, **kwargs):
"""return a SQL INTERSECT ALL of this select() construct against the
given selectable.
"""
return intersect_all(self, other, **kwargs)
def bind(self):
if self._bind:
return self._bind
if not self._froms:
for c in self._raw_columns:
e = c.bind
if e:
self._bind = e
return e
else:
e = list(self._froms)[0].bind
if e:
self._bind = e
return e
return None
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
class _UpdateBase(Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements."""
__visit_name__ = 'update_base'
_execution_options = \
Executable._execution_options.union({'autocommit': True})
kwargs = util.frozendict()
def _process_colparams(self, parameters):
if isinstance(parameters, (list, tuple)):
pp = {}
for i, c in enumerate(self.table.c):
pp[c.key] = parameters[i]
return pp
else:
return parameters
def params(self, *arg, **kw):
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
_returning_re = re.compile(r'(?:firebird|postgres(?:ql)?)_returning')
def _process_deprecated_kw(self, kwargs):
for k in list(kwargs):
m = self._returning_re.match(k)
if m:
self._returning = kwargs.pop(k)
util.warn_deprecated(
"The %r argument is deprecated. Please "
"use statement.returning(col1, col2, ...)" % k
)
return kwargs
@_generative
def returning(self, *cols):
"""Add a RETURNING or equivalent clause to this statement.
The given list of columns represent columns within the table that is
the target of the INSERT, UPDATE, or DELETE. Each element can be any
column expression. :class:`~sqlalchemy.schema.Table` objects will be
expanded into their individual columns.
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned
are made available via the result set and can be iterated
using ``fetchone()`` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle),
SQLAlchemy will approximate this behavior at the result level
so that a reasonable amount of behavioral neutrality is
provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
"""
self._returning = cols
class _ValuesBase(_UpdateBase):
__visit_name__ = 'values_base'
def __init__(self, table, values):
self.table = table
self.parameters = self._process_colparams(values)
@_generative
def values(self, *args, **kwargs):
"""specify the VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
\**kwargs
key=<somevalue> arguments
\*args
A single dictionary can be sent as the first positional
argument. This allows non-string based keys, such as Column
objects, to be used.
"""
if args:
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters = self._process_colparams(v)
self.parameters.update(kwargs)
else:
self.parameters = self.parameters.copy()
self.parameters.update(self._process_colparams(v))
self.parameters.update(kwargs)
class Insert(_ValuesBase):
"""Represent an INSERT construct.
The :class:`Insert` object is created using the :func:`insert()` function.
"""
__visit_name__ = 'insert'
_prefixes = ()
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
**kwargs):
_ValuesBase.__init__(self, table, values)
self._bind = bind
self.select = None
self.inline = inline
self._returning = returning
if prefixes:
self._prefixes = tuple([_literal_as_text(p) for p in prefixes])
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
def _copy_internals(self, clone=_clone):
# TODO: coverage
self.parameters = self.parameters.copy()
@_generative
def prefix_with(self, clause):
"""Add a word or expression between INSERT and INTO. Generative.
If multiple prefixes are supplied, they will be separated with
spaces.
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
class Update(_ValuesBase):
"""Represent an Update construct.
The :class:`Update` object is created using the :func:`update()` function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause,
values=None,
inline=False,
bind=None,
returning=None,
**kwargs):
_ValuesBase.__init__(self, table, values)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone):
# TODO: coverage
self._whereclause = clone(self._whereclause)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
class Delete(_UpdateBase):
"""Represent a DELETE construct.
The :class:`Delete` object is created using the :func:`delete()` function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause,
bind=None,
returning =None,
**kwargs):
self._bind = bind
self.table = table
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
if kwargs:
self.kwargs = self._process_deprecated_kw(kwargs)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone):
# TODO: coverage
self._whereclause = clone(self._whereclause)
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
quote = None
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
|
igemsoftware/SYSU-Software2013
|
project/Python27_32/Lib/site-packages/pypm/external/2/sqlalchemy/sql/expression.py
|
Python
|
mit
| 153,547
|
[
"VisIt"
] |
a0b9de762105560e598c17082025921599fc40a8c82eb1232b8cf1a37b0397d6
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGraph(RPackage):
"""A package that implements some simple graph handling capabilities."""
homepage = "https://www.bioconductor.org/packages/graph/"
url = "https://git.bioconductor.org/packages/graph"
version('1.54.0', git='https://git.bioconductor.org/packages/graph', commit='2a8b08520096241620421078fc1098f4569c7301')
depends_on('r@3.4.0:3.4.9', when='@1.54.0')
depends_on('r-biocgenerics', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-graph/package.py
|
Python
|
lgpl-2.1
| 1,712
|
[
"Bioconductor"
] |
421ee35875d9a271086f79b775908f7cf2d101ffaeb44b34e14991e03b00c18e
|
import logging
from autotest.client.shared import error
from virttest import virsh
try:
from virttest.staging import utils_memory
from virttest.staging import utils_cgroup
except ImportError:
from autotest.client.shared import utils_memory
from autotest.client.shared import utils_cgroup
def run(test, params, env):
"""
Test the command virsh memtune
(1) To get the current memtune parameters
(2) Change the parameter values
(3) Check the memtune query updated with the values
(4) Check whether the mounted cgroup path gets the updated value
(5) Login to guest and use the memory greater that the assigned value
and check whether it kills the vm.
(6) TODO:Check more values and robust scenarios.
"""
def check_limit(path, expected_value, limit_name):
"""
Matches the expected and actual output
(1) Match the output of the virsh memtune
(2) Match the output of the respective cgroup fs value
:params: path: memory controller path for a domain
:params: expected_value: the expected limit value
:params: limit_name: the limit to be checked
hard_limit/soft_limit/swap_hard_limit
:return: True or False based on the checks
"""
status_value = True
# Check 1
actual_value = virsh.memtune_get(domname, limit_name)
if actual_value == -1:
raise error.TestFail("the key %s not found in the "
"virsh memtune output" % limit_name)
if actual_value != int(expected_value):
status_value = False
logging.error("%s virsh output:\n\tExpected value:%d"
"\n\tActual value: "
"%d", limit_name,
int(expected_value), int(actual_value))
# Check 2
if limit_name == 'hard_limit':
cg_file_name = '%s/memory.limit_in_bytes' % path
elif limit_name == 'soft_limit':
cg_file_name = '%s/memory.soft_limit_in_bytes' % path
elif limit_name == 'swap_hard_limit':
cg_file_name = '%s/memory.memsw.limit_in_bytes' % path
cg_file = None
try:
try:
cg_file = open(cg_file_name)
output = cg_file.read()
value = int(output) / 1024
if int(expected_value) != int(value):
status_value = False
logging.error("%s cgroup fs:\n\tExpected Value: %d"
"\n\tActual Value: "
"%d", limit_name,
int(expected_value), int(value))
except IOError:
status_value = False
logging.error("Error while reading:\n%s", cg_file_name)
finally:
if cg_file is not None:
cg_file.close()
return status_value
# Get the vm name, pid of vm and check for alive
domname = params.get("main_vm")
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
pid = vm.get_pid()
logging.info("Verify valid cgroup path for VM pid: %s", pid)
# Resolve the memory cgroup path for a domain
path = utils_cgroup.resolve_task_cgroup_path(int(pid), "memory")
# Set the initial memory starting value for test case
# By default set 1GB less than the total memory
# In case of total memory is less than 1GB set to 256MB
# visit subtests.cfg to change these default values
Memtotal = utils_memory.read_from_meminfo('MemTotal')
base_mem = params.get("memtune_base_mem")
if int(Memtotal) < int(base_mem):
Mem = int(params.get("memtune_min_mem"))
else:
Mem = int(Memtotal) - int(base_mem)
# Initialize error counter
error_counter = 0
# Check for memtune command is available in the libvirt version under test
if not virsh.has_help_command("memtune"):
raise error.TestNAError(
"Memtune not available in this libvirt version")
# Run test case with 100kB increase in memory value for each iteration
while (Mem < Memtotal):
if virsh.has_command_help_match("memtune", "hard-limit"):
hard_mem = Mem - int(params.get("memtune_hard_base_mem"))
options = " --hard-limit %d --live" % hard_mem
virsh.memtune_set(domname, options)
if not check_limit(path, hard_mem, "hard_limit"):
error_counter += 1
else:
raise error.TestNAError("harlimit option not available in memtune "
"cmd in this libvirt version")
if virsh.has_command_help_match("memtune", "soft-limit"):
soft_mem = Mem - int(params.get("memtune_soft_base_mem"))
options = " --soft-limit %d --live" % soft_mem
virsh.memtune_set(domname, options)
if not check_limit(path, soft_mem, "soft_limit"):
error_counter += 1
else:
raise error.TestNAError("softlimit option not available in memtune "
"cmd in this libvirt version")
if virsh.has_command_help_match("memtune", "swap-hard-limit"):
swaphard = Mem
options = " --swap-hard-limit %d --live" % swaphard
virsh.memtune_set(domname, options)
if not check_limit(path, swaphard, "swap_hard_limit"):
error_counter += 1
else:
raise error.TestNAError("swaplimit option not available in memtune "
"cmd in this libvirt version")
Mem += int(params.get("memtune_hard_base_mem"))
# Raise error based on error_counter
if error_counter > 0:
raise error.TestFail(
"Test failed, consult the previous error messages")
|
PandaWei/tp-libvirt
|
libvirt/tests/src/virsh_cmd/domain/virsh_memtune.py
|
Python
|
gpl-2.0
| 5,890
|
[
"VisIt"
] |
dee0451860cdc8b4e1b2936286294690f572c09936aea610ef2c6ed91979d753
|
import matplotlib
matplotlib.use('Agg')
import numpy as np
import supp_functions as fce
import sys
import os
import xarray as xr
import pandas as pd
import argparse
import time
import platform
import statsmodels.api as sm
import statsmodels.stats.stattools as sms
import matplotlib as mpl
import matplotlib.pyplot as plt
periods=['','_01_jan','_02_feb','_03_mar','_04_apr','_05_may','_06_jun','_07_jul','_08_aug','_09_sep','_10_oct','_11_nov','_12_dec','_win', '_spr', '_sum', '_aut']
def fit(y, X, reg_names):
nr = len(reg_names)
try:
mod = sm.GLSAR(y.values, X, 2, missing = 'drop') # MLR analysis with AR2 modeling
res = mod.iterative_fit()
output = xr.Dataset({'coef': (['reg_name'], res.params[1:]), \
'conf_int': (['reg_name', 'limit'], res.conf_int()[1:,:]), \
'p_value': (['reg_name'], res.pvalues[1:]), \
'DWT': (sms.durbin_watson(res.wresid)), \
'CoD': (res.rsquared)}, \
coords = {'reg_name': (['reg_name'], reg_names),\
'limit': (['limit'], ['lower', 'upper'])})
except:
nans = np.full([nr], np.nan)
output = xr.Dataset({'coef': (['reg_name'], nans), \
'conf_int': (['reg_name', 'limit'], np.array([nans, nans]).T), \
'p_value': (['reg_name'], nans), \
'DWT': (np.nan), \
'CoD': (np.nan)}, \
coords = {'reg_name': (['reg_name'], reg_names),\
'limit': (['limit'], ['lower', 'upper'])})
return output
def xr_regression(y, **kwargs):
X = sm.add_constant(np.array(kwargs['reg']), prepend=True) # regressor matrix
res_ls = []
nr = kwargs['nr']
n = y.shape[0]
datum = range(1,13)
datum *= (n/12)
if kwargs['monthly']:
n_iter = 13
else:
n_iter = 1
for mi in xrange(n_iter):
monthi = mi == np.array(datum)
if mi == 0:
res = fit(y[~monthi], X[~monthi], kwargs['reg_names'])
else:
res = fit(y[monthi], X[monthi], kwargs['reg_names'])
res_ls.append(res)
res_da = xr.concat(res_ls, dim = 'month')
res_da['month'] = np.arange(0, n_iter)
return res_da
def main(args):
#environmental constants
if platform.system() == 'Windows':
n_samples=5000
in_dir='../examples/'
out_dir=''
reg_dir='../regressors/'#${in_dir}'regresory_2013/'
pdf_gen=True
nc_gen=True
else:
n_samples = int(os.environ['n_samples'])
in_dir = os.environ['in_dir']
out_dir = os.environ['out_dir']
reg_dir = os.environ['reg_dir']
pdf_gen = os.environ['pdf_gen']
nc_gen = os.environ['nc_gen']
plus = ''
# additional constants
what_sp = '' # what solar proxy?
norm = 4 # normalization type
"""Run the program."""
what_re = args.what_re
vari = args.vari
i_year = args.i_year
s_year = args.s_year
e_year = args.e_year
in_file_name = args.in_file_name
conf_str = args.config
suffix_pdf = '_{}-{}_{}.pdf'.format(s_year, e_year, conf_str)
suffix_nc = '_{}-{}_{}.nc'.format(s_year, e_year, conf_str)
monthly = args.monthly
#zonal_b = args.zonal_config
out_dir += vari+'_'+what_re+'_'
if args.verbose:
print('dataset: ', what_re)
print('variable: ', vari)
print('initial year of dataset: ', i_year)
print('initial year of analysis: ', s_year)
print('end year of analysis: ', e_year)
print('input filename: ', in_file_name)
print('regression configuration: ', conf_str)
#print('Zonal, whole or map? ', zonal_b)
if conf_str[-2:] == 'el':
conf_str = conf_str[:-3] # reg. conf. is the same for filt. or unfilt. analysis
filt_years = [1982,1983,1984]
elif conf_str[-2:] == 'pi':
conf_str = conf_str[:-3]
filt_years = [1991,1992,1993]
elif conf_str[-2:] == 'bo':
conf_str = conf_str[:-3]
filt_years = [1982,1983,1984,1991,1992,1993]
else:
filt_years = None
print('data opening')
in_netcdf = in_dir + in_file_name
#ds = xr.open_mfdataset(in_netcdf, concat_dim = 'ens')
ds = xr.open_dataset(in_netcdf)
#ds = ds.chunk({'lat': 10000})
lat_name = fce.get_coords_name(ds, 'latitude')
lat = ds.coords[lat_name].values
nlat = lat.shape[0]
lev_name = fce.get_coords_name(ds, 'air_pressure')
if ds.coords[lev_name].attrs['units'] == 'Pa':
lev = ds.coords[lev_name].values/100.
ds[lev_name] = lev
else:
lev = ds.coords[lev_name].values
#print(lev)
nlev = lev.shape[0]
gen = np.arange(nlev)
n = ds.coords['time'].shape[0]
#it may happen that the field is 3D (longitude is missing)
try:
lon_name = fce.get_coords_name(ds, 'longitude')
lon = ds.coords[lon_name].values
nlon = lon.shape[0]
except:
nlon = 1
#currently is tested for zonally averaged files only
if nlon != 1:
ds = ds.mean(lon_name)
print("regressors' openning")
#global reg, reg_names, nr
reg, reg_names, history = fce.configuration_ccmi(what_re, what_sp, norm, conf_str, i_year, s_year, e_year, reg_dir, filt_years = filt_years)
nr = reg.shape[1]
#select date range and variable
#times = pd.date_range(str(s_year)+'-01-01', str(e_year)+'-12-31', name='time', freq = 'M')
ds_sel = fce.date_range_xr(ds, i_year, s_year, e_year, 1, 12, n, filt_years = filt_years)#.sel(lat = slice(-25,25))#ds.sel(time = times, method='ffill') #nearest #[vari]
print('anomalies calculation')
anomalies, _ = fce.deseasonalize(ds_sel)
anomalies = anomalies.squeeze().reset_coords(drop=True)
print('regression calculation')
filt_dims = list(filter(lambda x: x not in ['time'], anomalies.dims))[::-1]
stacked = anomalies[vari].stack(allpoints = filt_dims)#[lev_name, lat_name])
reg_kwargs = dict(reg = reg, reg_names = reg_names, nr = nr, monthly = monthly)
coefs = stacked.groupby('allpoints').apply(xr_regression, **reg_kwargs)#.squeeze()
if lev_name in anomalies.dims:
coefs['allpoints'] = stacked.coords['allpoints'].sortby(lev_name) # I need to sort allpoints multiindex according to lev, otherwise I would get reversedcoefs
else:
coefs['allpoints'] = stacked.coords['allpoints']
ndims = len(filt_dims)
if ndims != 1:
cu_ds = coefs.unstack('allpoints')
else:
cu_ds = coefs.rename({'allpoints': filt_dims[0]})
if fce.str2bool(nc_gen):
print('netCDF Output')
cu_ds['coef'].attrs['long_name'] = 'Regression coefficients'
cu_ds['CoD'].attrs['long_name'] = 'Coefficient of determination'
cu_ds['p_value'].attrs['long_name'] = 'Statistical significance (p-value)'
cu_ds['DWT'].attrs['long_name'] = 'Durbin-Watson test'
if filt_dims in [lev_name]:
cu_ds[lev_name].attrs['units'] = 'hPa'
cu_ds.attrs['history'] = 'Regressors included: '+history
cu_ds.attrs['description'] = 'Created ' + time.ctime(time.time())
cu_ds.to_netcdf(out_dir+'stat_outputs'+suffix_nc)
if fce.str2bool(pdf_gen) and set(filt_dims) == set([lat_name, lev_name]):
print('RC visualization')
my_cmap = mpl.colors.ListedColormap(['yellow', 'red', 'white'])
fgp = xr.plot.FacetGrid(cu_ds['p_value'], row = 'reg_name', col = 'month', sharey = True, sharex = True)
plot_cf_kwargs = dict(yincrease=False, levels = [0,0.01,0.05,1], add_colorbar = False, cmap=my_cmap)
fgp.map_dataarray(xr.plot.contourf, lat_name, lev_name, **plot_cf_kwargs)
if vari in ['zmta']:
c_levels = [-30,-15,-10,-5,-2,-1,-0.5,-0.25]
c_levels += [0]+fce.rev_sign(c_levels)
c_levels = np.array(c_levels)
elif vari in ['zmua']:
c_levels = [-30,-15,-10,-5,-2,-1]
c_levels += [0]+fce.rev_sign(c_levels)
c_levels = np.array(c_levels)
else:
c_levels = np.arange(-10,11,1)
plot_kwargs_zero = dict(yincrease=False, cmap=('k'), linewidths = 6, add_colorbar=False, levels = [0])
plot_kwargs = dict(yincrease=False, cmap=('k'), add_colorbar=False, levels = c_levels[c_levels>0], linewidths = 3)
fgp.data = cu_ds['coef']
fgp.map_dataarray(xr.plot.contour, lat_name, lev_name, **plot_kwargs_zero)
fgp.data = cu_ds['coef']
fgp.map_dataarray(xr.plot.contour, lat_name, lev_name, **plot_kwargs)
plot_kwargs['levels'] = c_levels[c_levels<0]
plot_kwargs['linestyles'] = 'dashed'
fgp.data = cu_ds['coef']
fgp.map_dataarray(xr.plot.contour, lat_name, lev_name, **plot_kwargs)
ax = fgp.axes[0,0]
#ax.set_ylabel('pressure [hPa')
#ax.set_xlabel('latitude [deg]')
ax.set_ylim(1000,0.1)
ax.set_yscale('log')
plt.savefig(out_dir+'visualization'+suffix_pdf, bbox_inches = 'tight')
if __name__ == "__main__":
start = time.time()
#inputs
description = 'MLR input arguments.'
parser = argparse.ArgumentParser(description = description)
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
parser.add_argument("what_re", help="dataset shor name")
parser.add_argument("vari", help="variable name")
parser.add_argument("i_year", help="initial year of dataset", type=int)
parser.add_argument("s_year", help="initial year of analysis", type=int)
parser.add_argument("e_year", help="end year of analysi", type=int)
parser.add_argument("in_file_name", help="input filename")
choices_def = ['all_trend','all_2trends','all_eesc', 'no_saod_trend', 'no_saod_2trends', 'no_saod_eesc', 'no_saod_enso_trend', 'no_saod_enso_2trends', 'no_saod_enso_eesc', 'massi_trend', 'massi_2trends', 'massi_notrend', 'massi_eesc', 'solarAp_trend']
choices = choices_def + [i+'_bo' for i in choices_def] + [i+'_pi' for i in choices_def] + [i+'_el' for i in choices_def]
parser.add_argument("config", help="regression configuration", choices=choices)
parser.add_argument("--monthly", dest = 'monthly', action = 'store_true')
#parser.set_defaults(monthly = False)
#parser.add_argument("zonal_config", help="Zonal, whole or map?")
args = parser.parse_args()
main(args)
print('{} seconds elapsed'.format(time.time()-start))
print('done')
|
kuchaale/X-regression
|
scripts/lin_reg_univ_ccmi_xarray.py
|
Python
|
gpl-3.0
| 10,655
|
[
"NetCDF"
] |
71aa6e92a049a09d886292372ed47be7ee2edb1d8e061c8013fe4533aad83120
|
# coding: utf-8
"""Test mdn.compatibility."""
from __future__ import unicode_literals
from django.utils.six import text_type
from mdn.compatibility import (
CellVersion, CompatFeatureVisitor, CompatFootnoteVisitor,
CompatSectionExtractor, CompatSupportVisitor, Footnote,
compat_feature_grammar, compat_support_grammar, compat_footnote_grammar)
from mdn.kumascript import KumaVisitor, kumascript_grammar
from webplatformcompat.models import Feature, Support
from .base import TestCase
class TestCompatSectionExtractor(TestCase):
def setUp(self):
self.feature = self.get_instance('Feature', 'web-css-background-size')
self.visitor = KumaVisitor()
self.version = self.get_instance('Version', ('firefox_desktop', '1.0'))
def construct_html(
self, header=None, pre_table=None, feature=None,
browser=None, support=None, after_table=None):
"""Create a basic compatibility section."""
return """\
{header}
{pre_table}
<div id="compat-desktop">
<table class="compat-table">
<tbody>
<tr>
<th>Feature</th>
<th>{browser}</th>
</tr>
<tr>
<td>{feature}</td>
<td>{support}</td>
</tr>
</tbody>
</table>
</div>
{after_table}
""".format(
header=header or (
'<h2 id="Browser_compatibility">Browser compatibility</h2>'),
pre_table=pre_table or '<div>{{CompatibilityTable}}</div>',
browser=browser or 'Firefox',
feature=feature or '<code>contain</code> and <code>cover</code>',
support=support or '1.0',
after_table=after_table or '')
def get_default_compat_div(self):
browser_id = self.version.browser_id
version_id = self.version.id
return {
'name': u'desktop',
'browsers': [{
'id': browser_id, 'name': 'Firefox for Desktop',
'slug': 'firefox_desktop'}],
'versions': [{
'browser': browser_id, 'id': version_id, 'version': '1.0'}],
'features': [{
'id': '_contain and cover',
'name': '<code>contain</code> and <code>cover</code>',
'slug': 'web-css-background-size_contain_and_cover'}],
'supports': [{
'feature': '_contain and cover',
'id': '__contain and cover-%s' % version_id,
'support': 'yes', 'version': version_id}]}
def assert_extract(
self, html, compat_divs=None, footnotes=None, issues=None,
embedded=None):
parsed = kumascript_grammar['html'].parse(html)
out = self.visitor.visit(parsed)
extractor = CompatSectionExtractor(feature=self.feature, elements=out)
extracted = extractor.extract()
self.assertEqual(extracted['compat_divs'], compat_divs or [])
self.assertEqual(extracted['footnotes'], footnotes or {})
self.assertEqual(extracted['issues'], issues or [])
self.assertEqual(extracted['embedded'], embedded or [])
def test_standard(self):
html = self.construct_html()
expected = self.get_default_compat_div()
self.assert_extract(html, [expected])
def test_unknown_browser(self):
html = self.construct_html(browser='Fire')
expected = self.get_default_compat_div()
expected['browsers'][0] = {
'id': '_Fire', 'name': 'Fire', 'slug': '_Fire'}
expected['versions'][0] = {
'id': '_Fire-1.0', 'version': '1.0', 'browser': '_Fire'}
expected['supports'][0] = {
'id': u'__contain and cover-_Fire-1.0',
'support': 'yes',
'feature': '_contain and cover',
'version': '_Fire-1.0'}
issue = ('unknown_browser', 205, 218, {'name': 'Fire'})
self.assert_extract(html, [expected], issues=[issue])
def test_wrong_first_column_header(self):
# All known pages use "Feature" for first column, but be ready
html = self.construct_html()
html = html.replace('<th>Feature</th>', '<th>Features</th>')
expected = self.get_default_compat_div()
issue = ('feature_header', 180, 197, {'header': 'Features'})
self.assert_extract(html, [expected], issues=[issue])
def test_footnote(self):
html = self.construct_html(
support='1.0 [1]',
after_table='<p>[1] This is a footnote.</p>')
expected = self.get_default_compat_div()
expected['supports'][0]['footnote'] = 'This is a footnote.'
expected['supports'][0]['footnote_id'] = ('1', 322, 325)
self.assert_extract(html, [expected])
def test_footnote_mismatch(self):
html = self.construct_html(
support='1.0 [1]',
after_table='<p>[2] Oops, footnote ID is wrong.</p>')
expected = self.get_default_compat_div()
expected['supports'][0]['footnote_id'] = ('1', 322, 325)
footnotes = {'2': ('Oops, footnote ID is wrong.', 374, 412)}
issues = [
('footnote_missing', 322, 325, {'footnote_id': '1'}),
('footnote_unused', 374, 412, {'footnote_id': '2'})]
self.assert_extract(
html, [expected], footnotes=footnotes, issues=issues)
def test_extra_row_cell(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/
# Reference/Global_Objects/WeakSet, March 2015
html = self.construct_html()
html = html.replace(
'<td>1.0</td>', '<td>1.0</td><td>{{CompatUnknown()}}</td>')
self.assertTrue('CompatUnknown' in html)
expected = self.get_default_compat_div()
issue = ('extra_cell', 326, 354, {})
self.assert_extract(html, [expected], issues=[issue])
def test_compat_mobile_table(self):
mobile = """
<div id="compat-mobile">
<table class="compat-table">
<tbody>
<tr><th>Feature</th><th>Safari Mobile</th></tr>
<tr>
<td><code>contain</code> and <code>cover</code></td>
<td>1.0 [1]</td>
</tr>
</tbody>
</table>
</div>
<p></p>
<p>[1] It's really supported.</p>
"""
html = self.construct_html(after_table=mobile)
expected_desktop = self.get_default_compat_div()
expected_mobile = {
'name': 'mobile',
'browsers': [{
'id': '_Safari for iOS',
'name': 'Safari for iOS',
'slug': '_Safari for iOS',
}],
'features': [{
'id': '_contain and cover',
'name': '<code>contain</code> and <code>cover</code>',
'slug': 'web-css-background-size_contain_and_cover',
}],
'versions': [{
'id': '_Safari for iOS-1.0',
'version': '1.0',
'browser': '_Safari for iOS',
}],
'supports': [{
'id': '__contain and cover-_Safari for iOS-1.0',
'feature': '_contain and cover',
'support': 'yes',
'version': '_Safari for iOS-1.0',
'footnote': "It's really supported.",
'footnote_id': ('1', 581, 584),
}],
}
issue = ('unknown_browser', 465, 487, {'name': 'Safari Mobile'})
self.assert_extract(
html, [expected_desktop, expected_mobile], issues=[issue])
def test_pre_content(self):
header_plus = (
'<h2 id="Browser_compatibility">Browser compatibility</h2>'
'<p>Here\'s some extra content.</p>')
html = self.construct_html(header=header_plus)
expected = self.get_default_compat_div()
issue = ('skipped_content', 57, 90, {})
self.assert_extract(html, [expected], issues=[issue])
def test_feature_issue(self):
html = self.construct_html(
feature='<code>contain</code> and <code>cover</code> [1]')
expected = self.get_default_compat_div()
issue = ('footnote_feature', 300, 304, {})
self.assert_extract(html, [expected], issues=[issue])
def test_support_issue(self):
html = self.construct_html(support='1.0 (or earlier)')
expected = self.get_default_compat_div()
issue = ('inline_text', 322, 334, {'text': '(or earlier)'})
self.assert_extract(html, [expected], issues=[issue])
def test_footnote_issue(self):
html = self.construct_html(after_table="<p>Here's some text.</p>")
expected = self.get_default_compat_div()
issue = ('footnote_no_id', 370, 394, {})
self.assert_extract(html, [expected], issues=[issue])
def test_table_div_wraps_h3(self):
# https://developer.mozilla.org/en-US/docs/Web/API/AudioBufferSourceNode
html = self.construct_html()
html = html.replace(
'</div>', '<h3>Gecko Notes</h3><p>It rocks</p></div>')
expected = self.get_default_compat_div()
issues = [
('skipped_content', 58, 126, {}),
('footnote_gap', 434, 438, {}),
('footnote_no_id', 418, 433, {})]
self.assert_extract(html, [expected], issues=issues)
def test_support_colspan_exceeds_table_width(self):
# https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent
html = self.construct_html()
html = html.replace('<td>1.0', '<td colspan="2">1.0')
expected = self.get_default_compat_div()
issue = ('cell_out_of_bounds', 314, 338, {})
self.assert_extract(html, [expected], issues=[issue])
def test_embedded(self):
html = self.construct_html(
after_table="<div>{{EmbedCompatTable('foo-bar')}}</div>")
expected = self.get_default_compat_div()
self.assert_extract(html, [expected], embedded=['foo-bar'])
class TestFootnote(TestCase):
def test_numeric(self):
footnote = Footnote(raw='[1]', footnote_id='1')
self.assertEqual('[1]', text_type(footnote))
self.assertEqual('1', footnote.footnote_id)
self.assertEqual('1', footnote.raw_footnote)
def test_stars(self):
# TODO: replace "convert to '3'" with raw '***'
footnote = Footnote(raw='[***]', footnote_id='***')
self.assertEqual('[3]', text_type(footnote))
self.assertEqual('3', footnote.footnote_id)
self.assertEqual('***', footnote.raw_footnote)
class TestFeatureGrammar(TestCase):
def test_standard(self):
text = '<td>contain and cover</td>'
parsed = compat_feature_grammar['html'].parse(text)
assert parsed
def test_rowspan(self):
text = '<td rowspan="2">Two-line feature</td>'
parsed = compat_feature_grammar['html'].parse(text)
assert parsed
def test_cell_with_footnote(self):
text = '<td>Bad Footnote [1]</td>'
parsed = compat_feature_grammar['html'].parse(text)
assert parsed
class TestFeatureVisitor(TestCase):
scope = 'compatibility feature'
def setUp(self):
self.parent_feature = self.get_instance(
'Feature', 'web-css-background-size')
self.visitor = CompatFeatureVisitor(parent_feature=self.parent_feature)
def assert_feature(
self, contents, feature_id, name, slug, canonical=False,
experimental=False, standardized=True, obsolete=False,
issues=None):
row_cell = '<td>%s</td>' % contents
parsed = compat_feature_grammar['html'].parse(row_cell)
self.visitor.visit(parsed)
feature_dict = self.visitor.to_feature_dict()
self.assertEqual(issues or [], self.visitor.issues)
self.assertEqual(feature_id, feature_dict['id'])
self.assertEqual(slug, feature_dict['slug'])
self.assertEqual(name, feature_dict['name'])
if canonical:
self.assertTrue(feature_dict['canonical'])
else:
self.assertFalse('canonical' in feature_dict)
if experimental:
self.assertTrue(feature_dict['experimental'])
else:
self.assertFalse('experimental' in feature_dict)
if obsolete:
self.assertTrue(feature_dict['obsolete'])
else:
self.assertFalse('obsolete' in feature_dict)
if standardized:
self.assertFalse('standardized' in feature_dict)
else:
self.assertFalse(feature_dict['standardized'])
def test_remove_whitespace(self):
cell = (
' Support for<br>\n <code>contain</code> and'
' <code>cover</code> ')
feature_id = '_support for contain and cover'
name = 'Support for <code>contain</code> and <code>cover</code>'
slug = 'web-css-background-size_support_for_contain_and_co'
self.assert_feature(cell, feature_id, name, slug)
def test_code_sequence(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/display
cell = (
'<code>none</code>, <code>inline</code> and'
' <code>block</code>')
feature_id = '_none, inline and block'
name = '<code>none</code>, <code>inline</code> and <code>block</code>'
slug = 'web-css-background-size_none_inline_and_block'
self.assert_feature(cell, feature_id, name, slug)
def test_canonical(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/display
cell = '<code>list-item</code>'
feature_id = '_list-item'
name = 'list-item'
slug = 'web-css-background-size_list-item'
self.assert_feature(cell, feature_id, name, slug, canonical=True)
def test_canonical_match(self):
name = 'list-item'
slug = 'slug-list-item'
feature = self.create(
Feature, parent=self.parent_feature, name={'zxx': name}, slug=slug)
cell = '<code>list-item</code>'
self.assert_feature(cell, feature.id, name, slug, canonical=True)
def test_ks_experimental(self):
cell = '<code>grid</code> {{experimental_inline}}'
feature_id = '_grid'
name = 'grid'
slug = 'web-css-background-size_grid'
self.assert_feature(
cell, feature_id, name, slug, canonical=True, experimental=True)
def test_ks_non_standard_inline(self):
# https://developer.mozilla.org/en-US/docs/Web/API/AnimationEvent
cell = '<code>initAnimationEvent()</code> {{non-standard_inline}}'
feature_id = '_initanimationevent()'
name = 'initAnimationEvent()'
slug = 'web-css-background-size_initanimationevent'
self.assert_feature(
cell, feature_id, name, slug, canonical=True, standardized=False)
def test_ks_deprecated_inline(self):
cell = '<code>initAnimationEvent()</code> {{deprecated_inline}}'
feature_id = '_initanimationevent()'
name = 'initAnimationEvent()'
slug = 'web-css-background-size_initanimationevent'
self.assert_feature(
cell, feature_id, name, slug, canonical=True, obsolete=True)
def test_ks_obsolete_inline(self):
# https://developer.mozilla.org/en-US/docs/Web/API/WebSockets_API
cell = 'Version -76 support {{obsolete_inline}}'
feature_id = '_version -76 support'
name = 'Version -76 support'
slug = 'web-css-background-size_version_-76_support'
self.assert_feature(cell, feature_id, name, slug, obsolete=True)
def test_ks_htmlelement(self):
cell = '{{ HTMLElement("progress") }}'
feature_id = '_progress'
name = '<progress>'
slug = 'web-css-background-size_progress'
self.assert_feature(cell, feature_id, name, slug, canonical=True)
def test_ks_domxref(self):
cell = '{{domxref("DeviceProximityEvent")}}'
feature_id = '_deviceproximityevent'
name = 'DeviceProximityEvent'
slug = 'web-css-background-size_deviceproximityevent'
self.assert_feature(cell, feature_id, name, slug, canonical=True)
def test_unknown_kumascript(self):
cell = 'feature foo {{bar}}'
feature_id = '_feature foo'
name = 'feature foo'
slug = 'web-css-background-size_feature_foo'
issue = ('unknown_kumascript', 16, 23,
{'name': 'bar', 'args': [], 'kumascript': '{{bar}}',
'scope': self.scope})
self.assert_feature(cell, feature_id, name, slug, issues=[issue])
def test_nonascii_name(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/font-variant
cell = '<code>ß</code> → <code>SS</code>'
feature_id = '_\xdf \u2192 ss'
name = '<code>\xdf</code> \u2192 <code>SS</code>'
slug = 'web-css-background-size_ss'
self.assert_feature(cell, feature_id, name, slug)
def test_footnote(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/text-align
cell = 'Block alignment values [1] {{not_standard_inline}}'
feature_id = '_block alignment values'
name = 'Block alignment values'
slug = 'web-css-background-size_block_alignment_values'
issue = ('footnote_feature', 27, 31, {})
self.assert_feature(
cell, feature_id, name, slug, standardized=False, issues=[issue])
def test_bracket(self):
# https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input
cell = 'accept=[file extension]'
feature_id = '_accept=[file extension]'
name = 'accept=[file extension]'
slug = 'web-css-background-size_accept_file_extension'
self.assert_feature(cell, feature_id, name, slug)
def test_digit(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/transform
cell = '3D Support'
feature_id = '_3d support'
name = '3D Support'
slug = 'web-css-background-size_3d_support'
self.assert_feature(cell, feature_id, name, slug)
def test_link(self):
# https://developer.mozilla.org/en-US/docs/Web/API/EventSource
cell = ('<a href="/En/HTTP_access_control">'
'Cross-Origin Resource Sharing</a><br>')
feature_id = '_cross-origin resource sharing'
name = 'Cross-Origin Resource Sharing'
slug = 'web-css-background-size_cross-origin_resource_shar'
issue = (
'tag_dropped', 4, 38, {'tag': 'a', 'scope': self.scope})
self.assert_feature(cell, feature_id, name, slug, issues=[issue])
def test_p(self):
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/const
cell = '<p>Reassignment fails</p>'
feature_id = '_reassignment fails'
name = 'Reassignment fails'
slug = 'web-css-background-size_reassignment_fails'
issue = ('tag_dropped', 4, 7, {'tag': 'p', 'scope': self.scope})
self.assert_feature(cell, feature_id, name, slug, issues=[issue])
def test_span(self):
cell = '<span class="strong">Strong</span>'
feature_id = '_strong'
name = 'Strong'
slug = 'web-css-background-size_strong'
issue = ('tag_dropped', 4, 25, {'tag': 'span', 'scope': self.scope})
self.assert_feature(cell, feature_id, name, slug, issues=[issue])
def test_table(self):
# https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack
cell = """\
<table class="compat-table">
<tbody>
<tr>
<td><code>.stop()</code></td>
</tr>
</tbody>
</table>"""
feature_id = '_.stop()'
name = '.stop()'
slug = 'web-css-background-size_stop'
issues = [
('tag_dropped', 4, 32, {'tag': 'table', 'scope': self.scope}),
('tag_dropped', 35, 42, {'tag': 'tbody', 'scope': self.scope}),
('tag_dropped', 47, 51, {'tag': 'tr', 'scope': self.scope}),
('tag_dropped', 58, 62, {'tag': 'td', 'scope': self.scope})]
self.assert_feature(
cell, feature_id, name, slug, canonical=True, issues=issues)
class TestSupportGrammar(TestCase):
def assert_version(self, text, version, eng_version=None):
ws1, version_node, ws2 = (
compat_support_grammar['cell_version'].parse(text))
match = version_node.match.groupdict()
expected = {'version': version, 'eng_version': eng_version}
self.assertEqual(expected, match)
def test_version_number(self):
self.assert_version('1', version='1')
def test_cell_version_number_dotted(self):
self.assert_version('1.0', version='1.0')
def test_cell_version_number_spaces(self):
self.assert_version('1 ', version='1')
def test_cell_version_number_dotted_spaces(self):
self.assert_version('1.0\n\t', version='1.0')
def test_cell_version_number_with_engine(self):
self.assert_version('1.0 (85)', version='1.0', eng_version='85')
def test_cell_version_number_with_dotted_engine(self):
self.assert_version('5.0 (532.5)', version='5.0', eng_version='532.5')
def assert_no_prefix(self, text):
node = compat_support_grammar['cell_noprefix'].parse(text)
self.assertEqual(text, node.text)
def test_unprefixed(self):
# https://developer.mozilla.org/en-US/docs/Web/API/AudioContext.createBufferSource
self.assert_no_prefix(' (unprefixed) ')
def test_noprefix(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Navigator.vibrate
self.assert_no_prefix(' (no prefix) ')
def test_without_prefix_naked(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/text-decoration-line
self.assert_no_prefix('without prefix')
def test_without_prefix(self):
# https://developer.mozilla.org/en-US/docs/Web/API/BatteryManager
self.assert_no_prefix(' (without prefix) ')
def assert_partial(self, text):
node = compat_support_grammar['cell_partial'].parse(text)
self.assertEqual(text, node.text)
def test_comma_partial(self):
# https://developer.mozilla.org/en-US/docs/Web/API/IDBCursor
self.assert_partial(', partial')
def test_parens_partal(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/text-decoration
self.assert_partial('(partial)')
class TestCompatVersion(TestCase):
def test_dotted(self):
version = CellVersion(raw='1.0', version='1.0')
self.assertEqual('1.0', version.version)
self.assertEqual('1.0', text_type(version))
def test_plain(self):
version = CellVersion(raw='1', version='1')
self.assertEqual('1.0', version.version)
self.assertEqual('1.0', text_type(version))
def test_with_engine(self):
version = CellVersion(
raw='1.0 (85)', version='1.0', engine_version='85')
self.assertEqual('1.0', version.version)
self.assertEqual('1.0 (85)', text_type(version))
class TestSupportVisitor(TestCase):
scope = 'compatibility support'
def setUp(self):
self.feature_id = '_feature'
self.browser_id = '_browser'
self.browser_name = 'Browser'
self.browser_slug = 'browser'
def set_browser(self, browser):
self.browser_id = browser.id
self.browser_name = browser.name
self.browser_slug = browser.slug
def assert_support(
self, contents, expected_versions=None, expected_supports=None,
issues=None):
row_cell = '<td>%s</td>' % contents
parsed = compat_support_grammar['html'].parse(row_cell)
self.visitor = CompatSupportVisitor(
self.feature_id, self.browser_id, self.browser_name,
self.browser_slug)
self.visitor.visit(parsed)
expected_versions = expected_versions or []
expected_supports = expected_supports or []
self.assertEqual(len(expected_versions), len(expected_supports))
for version, support in zip(expected_versions, expected_supports):
if 'id' not in version:
version['id'] = '_{}-{}'.format(
self.browser_name, version['version'])
version['browser'] = self.browser_id
if 'id' not in support:
support['id'] = '_{}-{}'.format(self.feature_id, version['id'])
support['version'] = version['id']
support['feature'] = self.feature_id
self.assertEqual(expected_versions, self.visitor.versions)
self.assertEqual(expected_supports, self.visitor.supports)
self.assertEqual(issues or [], self.visitor.issues)
def test_version(self):
self.assert_support('1.0', [{'version': '1.0'}], [{'support': 'yes'}])
def test_version_matches(self):
version = self.get_instance('Version', ('firefox_desktop', '1.0'))
self.set_browser(version.browser)
self.assert_support(
'1.0', [{'version': '1.0', 'id': version.id}],
[{'support': 'yes'}])
def test_new_version_existing_browser(self):
browser = self.get_instance('Browser', 'firefox_desktop')
self.set_browser(browser)
issue = (
'unknown_version', 4, 7,
{'browser_id': browser.id,
'browser_name': {'en': 'Firefox for Desktop'},
'browser_slug': 'firefox_desktop', 'version': '2.0'})
self.assert_support(
'2.0', [{'version': '2.0'}], [{'support': 'yes'}], issues=[issue])
def test_support_matches(self):
version = self.get_instance('Version', ('firefox_desktop', '1.0'))
self.set_browser(version.browser)
feature = self.get_instance(
'Feature', 'web-css-background-size-contain_and_cover')
self.feature_id = feature.id
support = self.create(Support, version=version, feature=feature)
self.assert_support(
'1.0',
[{'version': '1.0', 'id': version.id}],
[{'support': 'yes', 'id': support.id}])
def test_compatno(self):
self.assert_support(
'{{CompatNo}}',
[{'version': 'current'}], [{'support': 'no'}])
def test_compatversionunknown(self):
self.assert_support(
'{{CompatVersionUnknown}}',
[{'version': 'current'}], [{'support': 'yes'}])
def test_compatunknown(self):
self.assert_support('{{CompatUnknown}}', [], [])
def test_compatgeckodesktop(self):
self.assert_support(
'{{CompatGeckoDesktop("1")}}',
[{'version': '1.0'}], [{'support': 'yes'}])
def test_compatgeckodesktop_bad_num(self):
self.assert_support(
'{{CompatGeckoDesktop("1.1")}}',
issues=[('compatgeckodesktop_unknown', 4, 33, {'version': '1.1'})])
def test_compatgeckofxos(self):
self.assert_support(
'{{CompatGeckoFxOS("7")}}',
[{'version': '1.0'}], [{'support': 'yes'}])
def test_compatgeckofxos_bad_version(self):
self.assert_support(
'{{CompatGeckoFxOS("999999")}}',
issues=[('compatgeckofxos_unknown', 4, 33, {'version': '999999'})])
def test_compatgeckofxos_bad_override(self):
self.assert_support(
'{{CompatGeckoFxOS("18","5.0")}}',
issues=[('compatgeckofxos_override', 4, 35,
{'override': '5.0', 'version': '18'})])
def test_compatgeckomobile(self):
self.assert_support(
'{{CompatGeckoMobile("1")}}',
[{'version': '1.0'}], [{'support': 'yes'}])
def test_compatandroid(self):
self.assert_support(
'{{CompatAndroid("3.0")}}',
[{'version': '3.0'}], [{'support': 'yes'}])
def test_compatnightly(self):
self.assert_support(
'{{CompatNightly}}',
[{'version': 'nightly'}], [{'support': 'yes'}])
def test_unknown_kumascript(self):
issues = [(
'unknown_kumascript', 4, 19,
{'name': 'UnknownKuma', 'args': [],
'scope': 'compatibility support',
'kumascript': '{{UnknownKuma}}'})]
self.assert_support('{{UnknownKuma}}', issues=issues)
def test_with_prefix_and_break(self):
self.assert_support(
('{{CompatVersionUnknown}}{{property_prefix("-webkit")}}<br>\n'
' 2.3'),
[{'version': 'current'}, {'version': '2.3'}],
[{'support': 'yes', 'prefix': '-webkit'}, {'support': 'yes'}])
def test_p_tags(self):
self.assert_support(
'<p>4.0</p><p>32</p>',
[{'version': '4.0'}, {'version': '32.0'}],
[{'support': 'yes'}, {'support': 'yes'}])
def test_two_line_note(self):
self.assert_support(
'18<br>\n(behind a pref) [1]',
[{'version': '18.0'}],
[{'support': 'yes', 'footnote_id': ('1', 27, 30)}],
issues=[('inline_text', 10, 27, {'text': '(behind a pref)'})])
def test_removed_in_gecko(self):
self.assert_support(
('{{ CompatGeckoMobile("6.0") }}<br>'
'Removed in {{ CompatGeckoMobile("23.0") }}'),
[{'version': '6.0'}, {'version': '23.0'}],
[{'support': 'yes'}, {'support': 'no'}])
def test_multi_br(self):
self.assert_support(
('{{ CompatGeckoMobile("6.0") }}<br><br>'
'Removed in {{ CompatGeckoMobile("23.0") }}'),
[{'version': '6.0'}, {'version': '23.0'}],
[{'support': 'yes'}, {'support': 'no'}])
def test_removed_in_version(self):
self.assert_support(
'Removed in 32',
[{'version': '32.0'}], [{'support': 'no'}])
def test_unprefixed(self):
# https://developer.mozilla.org/en-US/docs/Web/API/AudioContext.createBufferSource
self.assert_support(
'32 (unprefixed)',
[{'version': '32.0'}], [{'support': 'yes'}])
def test_partial(self):
# https://developer.mozilla.org/en-US/docs/Web/API/IDBCursor
self.assert_support(
'10, partial',
[{'version': '10.0'}], [{'support': 'partial'}])
def test_unmatched_free_text(self):
self.assert_support(
'32 (or earlier)',
[{'version': '32.0'}], [{'support': 'yes'}],
issues=[('inline_text', 7, 19, {'text': '(or earlier)'})])
def test_code_block(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/order
self.assert_support(
'32 with alt name <code>foobar</code>',
[{'version': '32.0'}], [{'support': 'yes'}],
issues=[
('inline_text', 7, 21, {'text': 'with alt name'}),
('inline_text', 21, 40, {'text': '<code>foobar</code>'})])
def test_spaces(self):
self.assert_support(' ')
def test_prefix_plus_footnote(self):
self.assert_support(
'18{{property_prefix("-webkit")}} [1]',
[{'version': '18.0'}],
[{'support': 'partial', 'prefix': '-webkit',
'footnote_id': ('1', 37, 40)}])
def test_prefix_double_footnote(self):
# https://developer.mozilla.org/en-US/docs/Web/API/CSSSupportsRule
self.assert_support(
'{{ CompatGeckoDesktop("17") }} [1][2]',
[{'version': '17.0'}],
[{'support': 'yes', 'footnote_id': ('1', 35, 38)}],
issues=[('footnote_multiple', 38, 41,
{'prev_footnote_id': '1', 'footnote_id': '2'})])
def test_double_footnote_link_sup(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/flex
self.assert_support(
'{{CompatGeckoDesktop("20.0")}} '
'<sup><a href="#bc2">[2]</a><a href="#bc3">[3]</a></sup>',
[{'version': '20.0'}],
[{'support': 'yes', 'footnote_id': ('2', 55, 58)}],
issues=[('footnote_multiple', 77, 80,
{'prev_footnote_id': '2', 'footnote_id': '3'})])
def test_star_footnote(self):
# TODO: use raw footnote once footnote section is converted
self.assert_support(
'{{CompatGeckoDesktop("20.0")}} [***]',
[{'version': '20.0'}],
[{'support': 'yes', 'footnote_id': ('3', 35, 40)}])
def test_nbsp(self):
self.assert_support(
'15 {{property_prefix("webkit")}}',
[{'version': '15.0'}], [{'support': 'yes', 'prefix': 'webkit'}])
def test_other_kumascript(self):
issue = (
'unexpected_kumascript', 7, 30,
{'kumascript': '{{experimental_inline}}',
'name': 'experimental_inline', 'args': [], 'scope': self.scope,
'expected_scopes': 'compatibility feature'})
self.assert_support(
'22 {{experimental_inline}}',
[{'version': '22.0'}], [{'support': 'yes'}], issues=[issue])
def test_multiversion_prefix_no(self):
# https://developer.mozilla.org/en-US/docs/Web/API/Text/replaceWholeText
self.assert_support(
'{{CompatVersionUnknown}} [1] <br> 30.0 <br> {{CompatNo}} 41.0',
[{'version': 'current'}, {'version': '30.0'}, {'version': '41.0'}],
[{'support': 'yes', 'footnote_id': ('1', 29, 33)},
{'support': 'yes'}, {'support': 'no'}])
def test_multiversion_suffix_no(self):
self.assert_support(
'{{CompatVersionUnknown}} [1] <br> 30.0 <br> 41.0 {{CompatNo}}',
[{'version': 'current'}, {'version': '30.0'}, {'version': '41.0'}],
[{'support': 'yes', 'footnote_id': ('1', 29, 33)},
{'support': 'yes'}, {'support': 'no'}])
class TestFootnoteGrammar(TestCase):
def test_footnote_paragraph(self):
footnotes = '<p>[2] A footnote</p>'
parsed = compat_footnote_grammar['html'].parse(footnotes)
self.assertEqual(footnotes, parsed.text)
class TestFootnoteVisitor(TestCase):
scope = 'compatibility footnote'
def setUp(self):
self.visitor = CompatFootnoteVisitor()
def assert_footnotes(self, content, expected, issues=None, embedded=None):
parsed = compat_footnote_grammar['html'].parse(content)
self.visitor.visit(parsed)
footnotes = self.visitor.finalize_footnotes()
self.assertEqual(expected, footnotes)
self.assertEqual(issues or [], self.visitor.issues)
self.assertEqual(embedded or [], self.visitor.embedded)
def test_empty(self):
footnotes = '\n'
expected = {}
self.assert_footnotes(footnotes, expected)
def test_simple(self):
footnotes = '<p>[1] A footnote.</p>'
expected = {'1': ('A footnote.', 0, 22)}
self.assert_footnotes(footnotes, expected)
def test_multi_paragraph(self):
footnotes = '<p>[1] Footnote line 1.</p><p>Footnote line 2.</p>'
expected = {
'1': ('<p>Footnote line 1.</p>\n<p>Footnote line 2.</p>', 0, 50)}
self.assert_footnotes(footnotes, expected)
def test_multiple_footnotes(self):
footnotes = '<p>[1] Footnote 1.</p><p>[2] Footnote 2.</p>'
expected = {'1': ('Footnote 1.', 0, 22), '2': ('Footnote 2.', 22, 44)}
self.assert_footnotes(footnotes, expected)
def test_kumascript_cssxref(self):
footnotes = '<p>[1] Use {{cssxref("-moz-border-image")}}</p>'
expected = {
'1': (
'Use <a href="https://developer.mozilla.org/en-US/docs/Web/'
'CSS/-moz-border-image"><code>-moz-border-image</code></a>',
0, 47)}
self.assert_footnotes(footnotes, expected)
def test_unknown_kumascriptscript(self):
footnotes = (
'<p>[1] Footnote {{UnknownKuma}} but the beat continues.</p>')
expected = {'1': ('Footnote but the beat continues.', 0, 59)}
issue = (
'unknown_kumascript', 16, 32,
{'name': 'UnknownKuma', 'args': [], 'scope': 'footnote',
'kumascript': '{{UnknownKuma}}'})
self.assert_footnotes(footnotes, expected, issues=[issue])
def test_pre_section(self):
footnotes = '<p>[1] Here\'s some code:</p><pre>foo = bar</pre>'
expected = {
'1': ("<p>Here's some code:</p>\n<pre>foo = bar</pre>", 0, 48)}
self.assert_footnotes(footnotes, expected)
def test_pre_without_footnotes(self):
footnotes = '<p>Here\'s some code:</p><pre>foo = bar</pre>'
issues = [
('footnote_no_id', 0, 24, {}),
('footnote_no_id', 24, 44, {})]
self.assert_footnotes(footnotes, {}, issues)
def test_pre_with_attrs_section(self):
# https://developer.mozilla.org/en-US/docs/Web/CSS/white-space
footnotes = (
'<p>[1] Here\'s some code:</p>\n'
'<pre class="brush:css">\n'
'.foo {background-image: url(bg-image.png);}\n'
'</pre>')
expected = {
'1': (
"<p>Here's some code:</p>\n<pre>\n"
'.foo {background-image: url(bg-image.png);}\n</pre>',
0, 103)}
issue = (
'unexpected_attribute', 34, 51,
{'ident': 'class', 'node_type': 'pre', 'value': 'brush:css',
'expected': 'no attributes'})
self.assert_footnotes(footnotes, expected, issues=[issue])
def test_asterisk(self):
footnotes = '<p>[*] A footnote</p>'
expected = {'1': ('A footnote', 0, 21)}
self.assert_footnotes(footnotes, expected)
def test_bad_footnote(self):
footnotes = '<p>A footnote.</p>'
issue = ('footnote_no_id', 0, 18, {})
self.assert_footnotes(footnotes, {}, issues=[issue])
def test_bad_footnote_prefix(self):
footnotes = '<p>Footnote [1] - The content.</p>'
expected = {'1': ('- The content.', 16, 30)}
issue = ('footnote_no_id', 3, 12, {})
self.assert_footnotes(footnotes, expected, issues=[issue])
def test_bad_footnote_unknown_kumascript(self):
# https://developer.mozilla.org/en-US/docs/Web/SVG/Element/color-profile
footnotes = '<p>{{SVGRef}}</p>'
issue = (
'unknown_kumascript', 3, 13,
{'name': 'SVGRef', 'args': [], 'kumascript': '{{SVGRef}}',
'scope': u'footnote'})
self.assert_footnotes(footnotes, {}, issues=[issue])
def test_empty_paragraph_no_footnotes(self):
footnotes = ('<p> </p>\n')
self.assert_footnotes(footnotes, {})
def test_empty_paragraph_invalid_footnote(self):
footnotes = (
'<p> </p>\n'
'<p>Invalid footnote.</p>\n'
'<p> </p>')
issue = ('footnote_no_id', 9, 33, {})
self.assert_footnotes(footnotes, {}, issues=[issue])
self.assertEqual(footnotes[9:33], '<p>Invalid footnote.</p>')
def test_empty_paragraphs_trimmed(self):
footnote = (
'<p> </p>\n'
'<p>[1] Valid footnote.</p>'
'<p> </p>'
'<p>Continues footnote 1.</p>')
expected = {
'1': (
'<p>Valid footnote.</p>\n<p>Continues footnote 1.</p>',
9, 73)}
self.assert_footnotes(footnote, expected)
def test_code(self):
footnote = (
'<p>[1] From Firefox 31 to 35, <code>will-change</code>'
' was available...</p>')
expected = {
'1': (
'From Firefox 31 to 35, <code>will-change</code>'
' was available...', 0, 75)}
self.assert_footnotes(footnote, expected)
def test_span(self):
# https://developer.mozilla.org/en-US/docs/Web/Events/DOMContentLoaded
footnote = (
'<p>[1]<span style="font-size: 14px; line-height: 18px;">'
'Bubbling for this event is supported by at least Gecko 1.9.2,'
' Chrome 6, and Safari 4.</span></p>')
expected = {
'1': ('Bubbling for this event is supported by at least Gecko'
' 1.9.2, Chrome 6, and Safari 4.', 0, 152)}
issue = ('tag_dropped', 6, 56, {'scope': 'footnote', 'tag': 'span'})
self.assert_footnotes(footnote, expected, issues=[issue])
def test_a(self):
# https://developer.mozilla.org/en-US/docs/Web/SVG/SVG_as_an_Image
footnote = (
'<p>[1] Compatibility data from'
'<a href="http://caniuse.com" title="http://caniuse.com">'
'caniuse.com</a>.</p>')
expected = {
'1': ('Compatibility data from <a href="http://caniuse.com">'
'caniuse.com</a>.', 0, 106)}
self.assert_footnotes(footnote, expected)
def test_br_start(self):
# https://developer.mozilla.org/en-US/docs/Web/API/VRFieldOfViewReadOnly/downDegrees
footnote = "<p><br>\n[1] To find information on Chrome's WebVR...</p>"
expected = {'1': ("To find information on Chrome's WebVR...", 0, 56)}
self.assert_footnotes(footnote, expected)
def test_br_end(self):
# https://developer.mozilla.org/en-US/docs/Web/Events/wheel
footnote = "<p>[1] Here's a footnote. <br></p>"
expected = {'1': ("Here's a footnote.", 0, 34)}
self.assert_footnotes(footnote, expected)
def test_br_footnotes(self):
# https://developer.mozilla.org/en-US/docs/Web/API/URLUtils/hash
footnote = '<p>[1] Footnote 1.<br>[2] Footnote 2.</p>'
expected = {'1': ('Footnote 1.', 7, 18), '2': ('Footnote 2.', 26, 37)}
self.assert_footnotes(footnote, expected)
def test_embedcompattable(self):
footnote = '<div>{{EmbedCompatTable("foo")}}</div>'
self.assert_footnotes(footnote, {}, embedded=['foo'])
|
mdn/browsercompat
|
mdn/tests/test_compatibility.py
|
Python
|
mpl-2.0
| 41,629
|
[
"VisIt"
] |
caffc8daa02c79a8bc02abb26a12184618911578624d2dc195d70f53672c5d27
|
# Copyright (C) 2017 Alex Nitz
#
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
from six.moves import range
import numpy, pycbc.psd
from pycbc.types import TimeSeries, FrequencySeries, complex_same_precision_as
from numpy.random import RandomState
# These need to be constant to be able to recover identical results.
# The hope is that nobody needs a higher resolution
SAMPLE_RATE = 16384
BLOCK_SIZE = 100
FILTER_LENGTH = 128
def block(seed):
""" Return block of normal random numbers
Parameters
----------
seed : {None, int}
The seed to generate the noise.sd
Returns
--------
noise : numpy.ndarray
Array of random numbers
"""
num = SAMPLE_RATE * BLOCK_SIZE
rng = RandomState(seed % 2**32)
variance = SAMPLE_RATE / 2
return rng.normal(size=num, scale=variance**0.5)
def normal(start, end, seed=0):
""" Generate data with a white Gaussian (normal) distribution
Parameters
----------
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise
"""
# This is reproduceable because we used fixed seeds from known values
s = int(start / BLOCK_SIZE)
e = int(end / BLOCK_SIZE)
# The data evenly divides so the last block would be superfluous
if end % BLOCK_SIZE == 0:
e -= 1
sv = RandomState(seed).randint(-2**50, 2**50)
data = numpy.concatenate([block(i + sv) for i in numpy.arange(s, e + 1, 1)])
ts = TimeSeries(data, delta_t=1.0 / SAMPLE_RATE, epoch=start)
return ts.time_slice(start, end)
def colored_noise(psd, start_time, end_time, seed=0, low_frequency_cutoff=1.0):
""" Create noise from a PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd : pycbc.types.FrequencySeries
PSD to color the noise
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
low_frequency_cutof : {1.0, float}
The low frequency cutoff to pass to the PSD generation.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
psd = psd.copy()
flen = int(SAMPLE_RATE / psd.delta_f) / 2 + 1
oldlen = len(psd)
psd.resize(flen)
# Want to avoid zeroes in PSD.
max_val = psd.max()
for i in range(len(psd)):
if i >= (oldlen-1):
psd.data[i] = psd[oldlen - 2]
if psd[i] == 0:
psd.data[i] = max_val
wn_dur = int(end_time - start_time) + 2*FILTER_LENGTH
if psd.delta_f >= 1. / (2.*FILTER_LENGTH):
# If the PSD is short enough, this method is less memory intensive than
# resizing and then calling inverse_spectrum_truncation
psd = pycbc.psd.interpolate(psd, 1.0 / (2.*FILTER_LENGTH))
# inverse_spectrum_truncation truncates the inverted PSD. To truncate
# the non-inverted PSD we give it the inverted PSD to truncate and then
# invert the output.
psd = 1. / pycbc.psd.inverse_spectrum_truncation(1./psd,
FILTER_LENGTH * SAMPLE_RATE,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
psd = psd.astype(complex_same_precision_as(psd))
# Zero-pad the time-domain PSD to desired length. Zeroes must be added
# in the middle, so some rolling between a resize is used.
psd = psd.to_timeseries()
psd.roll(SAMPLE_RATE * FILTER_LENGTH)
psd.resize(wn_dur * SAMPLE_RATE)
psd.roll(-SAMPLE_RATE * FILTER_LENGTH)
# As time series is still mirrored the complex frequency components are
# 0. But convert to real by using abs as in inverse_spectrum_truncate
psd = psd.to_frequencyseries()
else:
psd = pycbc.psd.interpolate(psd, 1.0 / wn_dur)
psd = 1. / pycbc.psd.inverse_spectrum_truncation(1./psd,
FILTER_LENGTH * SAMPLE_RATE,
low_frequency_cutoff=low_frequency_cutoff,
trunc_method='hann')
kmin = int(low_frequency_cutoff / psd.delta_f)
psd[:kmin].clear()
asd = (psd.real())**0.5
del psd
white_noise = normal(start_time - FILTER_LENGTH, end_time + FILTER_LENGTH,
seed=seed)
white_noise = white_noise.to_frequencyseries()
# Here we color. Do not want to duplicate memory here though so use '*='
white_noise *= asd
del asd
colored = white_noise.to_timeseries()
del white_noise
return colored.time_slice(start_time, end_time)
def noise_from_string(psd_name, start_time, end_time, seed=0, low_frequency_cutoff=1.0):
""" Create noise from an analytic PSD
Return noise from the chosen PSD. Note that if unique noise is desired
a unique seed should be provided.
Parameters
----------
psd_name : str
Name of the analytic PSD to use.
start_time : int
Start time in GPS seconds to generate noise
end_time : int
End time in GPS seconds to generate nosie
seed : {None, int}
The seed to generate the noise.
low_frequency_cutof : {10.0, float}
The low frequency cutoff to pass to the PSD generation.
Returns
--------
noise : TimeSeries
A TimeSeries containing gaussian noise colored by the given psd.
"""
delta_f = 1.0 / FILTER_LENGTH
flen = int(SAMPLE_RATE / delta_f) / 2 + 1
psd = pycbc.psd.from_string(psd_name, flen, delta_f, low_frequency_cutoff)
return colored_noise(psd, start_time, end_time,
seed=seed,
low_frequency_cutoff=low_frequency_cutoff)
|
cmbiwer/pycbc
|
pycbc/noise/reproduceable.py
|
Python
|
gpl-3.0
| 6,976
|
[
"Gaussian"
] |
1eafff912da454982ef923632404b6be199e30f7d4a708c2ba7ceb95a9c1bb2c
|
import sys
sys.path.insert(1, "../../../")
import h2o
def link_correct_default(ip,port):
print("Reading in original prostate data.")
h2o_data = h2o.upload_file(path=h2o.locate("smalldata/prostate/prostate.csv.zip"))
print("Compare models with link unspecified and canonical link specified.")
print("GAUSSIAN: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian")
h2o_model_specified = h2o.glm(x=h2o_data[1:8], y=h2o_data[8], family="gaussian", link="identity")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("BINOMIAL: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="binomial", link="logit")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("POISSON: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson")
h2o_model_specified = h2o.glm(x=h2o_data[2:9], y=h2o_data[1], family="poisson", link="log")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
print("GAMMA: ")
h2o_model_unspecified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma")
h2o_model_specified = h2o.glm(x=h2o_data[3:9], y=h2o_data[2], family="gamma", link="inverse")
assert h2o_model_specified._model_json['output']['coefficients_table'].cell_values == \
h2o_model_unspecified._model_json['output']['coefficients_table'].cell_values, "coefficient should be equal"
if __name__ == "__main__":
h2o.run_test(sys.argv, link_correct_default)
|
PawarPawan/h2o-v3
|
h2o-py/tests/testdir_algos/glm/pyunit_link_correct_default_largeGLM.py
|
Python
|
apache-2.0
| 1,986
|
[
"Gaussian"
] |
b0759b726ec6288c3fe914f0504f781b688e91540542322b53f93eb2eba1bb41
|
#!/usr/bin/env python3
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# LOLO: add an empty array for SSH in output netcdf file !!!
## + same for ssu and ssv !
## + add constant salinity sss of say 34 PSU
##
import sys
from os import path
import cdsapi
from netCDF4 import Dataset,num2date
from math import copysign
import numpy as nmp
yyyy = 2018
l_interp_daily = True
# Coordinates (point) we want to extract for STATION ASF:
plon = -36. ; plat = 84. ; # North Greenland...
#plon = 36.75 ; plat = 81. ; # East of Svalbard
#plon = -65.1 ; plat = 73.2 ; # Center of Baffin Bay
list_crd_expected = ['longitude', 'latitude', 'time']
# Their name in the downloaded file:
### dumping 'fal','forecast_albedo' since it's proportional to ice fraction...
list_var_expected = ['u10', 'v10', 'd2m', 't2m', 'istl1', \
'msl', 'skt','ssrd', 'strd', 'tp', 'sf' ]
# Their name in the cdsapi request:
lvdl_h = [ '10m_u_component_of_wind', '10m_v_component_of_wind', '2m_dewpoint_temperature', '2m_temperature', \
'ice_temperature_layer_1', 'mean_sea_level_pressure', 'skin_temperature', 'surface_solar_radiation_downwards', \
'surface_thermal_radiation_downwards', 'total_precipitation', 'snowfall' ]
# Daily fields:
list_var_daily_expected = [ 'siconc' , 'sst' ];#, ]
lvdl_d = [ 'sea_ice_cover', 'sea_surface_temperature' ];#, ]
# In output file:
cv_lon = 'nav_lon'
cv_lat = 'nav_lat'
cv_tim = 'time_counter'
# List of flux variables to convert to right unit (divide by rdt):
rdt = 3600.
list_flx = ['ssrd', 'strd', 'tp' , 'sf' ]
list_fnu = ['W m**-2', 'W m**-2', 'mm s**-1', 'mm s**-1' ]
fact_flx = [ 1./rdt, 1./rdt , 1000./rdt , 1000./rdt ] ; # tp and sf in 'm' ...
list_temp_to_degC = [ 'sst' , 'skt' ] ; # For some reasons SAS part of NEMO expects SSTs in deg. Celsius...
# Extra fields (for the SAS part) to add and their value (constant along time records...):
list_extra = [ 'sss' , 'ssh' , 'ssu' , 'ssv' , 'ialb' ]
rval_extra = [ 34. , 0. , 0. , 0. , 0.55 ]
cunt_extra = [ '' , 'm' , 'm s**-1' , 'm s**-1' , '' ]
clnm_extra = [ 'Sea surface salinity', 'Sea surface height', 'Zonal surface current', 'Meridional surface current', 'Sea-ice albedo' ]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def long_to_m180_p180(xx):
## Forces longitude to be in the -180:180 frame...
## xx: longitude
xx = xx % 360.
rlon = copysign(1.,180.-xx)*min(xx, abs(xx-360.)) ;
return rlon
# Coordinates of 10deg-wide box to download:
dw = 5. ; # degrees
irng_lon = [ int(round(long_to_m180_p180(plon-dw),0)) , int(round(long_to_m180_p180(plon+dw),0)) ]
irng_lat = [ int(round(max(plat-dw,-90.),0)) , int(round(min(plat+dw, 90.),0)) ]
plon = long_to_m180_p180(plon)
print(' * Longitude =', plon, ' =>', irng_lon[:])
print(' * Latitude =', plat, ' =>', irng_lat[:])
print('')
cinfo_box5x5 = str(irng_lat[1])+'-'+str(irng_lat[0])+'_'+str(irng_lon[0])+'-'+str(irng_lon[1])
cinfo_coord = str(plat)+'N_'+str(plon)+'E'
# Monthly stuff:
nbfld_daily = len(list_var_daily_expected)
if len(lvdl_d) != nbfld_daily:
print(' ERROR: download list "lvdl_d" not the same size as "list_var_daily_expected"!!!', len(lvdl_d), nbfld_daily) ; sys.exit(0)
# Hourly stuff:
nbfld = len(list_var_expected)
if len(lvdl_h) != nbfld:
print(' ERROR: download list "lvdl_h" not the same size as "list_var_expected"!!!', len(lvdl_h), nbfld)
print(lvdl_h,'\n')
print(list_var_expected,'\n')
sys.exit(0)
nbfld_tot = nbfld
if l_interp_daily: nbfld_tot = nbfld + nbfld_daily
c = cdsapi.Client()
#################
# Daily fields #
#################
Nt_tot_daily = 0
for jm in range(12):
cm = '%2.2i'%(jm+1)
cf_fi_d = 'ERA5_arctic_surface__BOX-5x5deg_'+cinfo_box5x5+'__'+str(yyyy)+cm+'_daily.nc'
#cf_fo_d = 'ERA5_arctic_surface_'+cinfo_coord+'_1d_y'+str(yyyy)+'m'+cm+'.nc'
print(' *** cf_fi_d = '+cf_fi_d)
#print(' *** cf_fo_d = '+cf_fo_d)
if not path.exists(cf_fi_d):
print('\nDoing month '+cm+' !')
c.retrieve(
'reanalysis-era5-single-levels',
{
'product_type': 'reanalysis',
'format': 'netcdf',
'variable': lvdl_d,
'year': str(yyyy),
'month': [ cm, ],
'day': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
'13', '14', '15',
'16', '17', '18',
'19', '20', '21',
'22', '23', '24',
'25', '26', '27',
'28', '29', '30',
'31',
],
'time': [ '12:00' ],
'area': [
irng_lat[1], irng_lon[0], irng_lat[0],
irng_lon[1], #
],
},
cf_fi_d )
if cm == '01':
c.retrieve(
'reanalysis-era5-single-levels',
{
'product_type': 'reanalysis',
'format': 'netcdf',
'variable': lvdl_d,
'year': str(yyyy-1),
'month': [ '12', ],
'day': [ '31', ],
'time': [ '12:00' ],
'area': [
irng_lat[1], irng_lon[0], irng_lat[0],
irng_lon[1],
],
},
cf_fi_d+'.before' )
if cm == '12':
c.retrieve(
'reanalysis-era5-single-levels',
{
'product_type': 'reanalysis',
'format': 'netcdf',
'variable': lvdl_d,
'year': str(yyyy+1),
'month': [ '01', ],
'day': [ '31', ],
'time': [ '12:00' ],
'area': [
irng_lat[1], irng_lon[0], irng_lat[0],
irng_lon[1],
],
},
cf_fi_d+'.after' )
else:
print('\nAlready done month '+cm+' !')
print('')
# Gonna fix this crap! #lulu
list_fi_check = [ cf_fi_d ]
if cm == '01': list_fi_check = [ cf_fi_d+'.before', cf_fi_d ]
if cm == '12': list_fi_check = [ cf_fi_d, cf_fi_d+'.after' ]
for ff in list_fi_check:
print('\n Checking file '+ff+' !')
id_fi = Dataset(ff)
# 1/ populate variables and check it's what's expected:
list_var = list(id_fi.variables.keys())
print(' *** list_var =', list_var)
if list_var[:3] != list_crd_expected:
print(' ERROR this is not the list of coordinates we expected...') ; sys.exit(0)
if list_var[3:] != list_var_daily_expected:
print(' ERROR this is not the list of variables we expected...') ; sys.exit(0)
Ni = id_fi.dimensions['longitude'].size
Nj = id_fi.dimensions['latitude'].size
#if not id_fi.dimensions['time'].isunlimited(): print 'PROBLEM: the time dimension is not UNLIMITED! Bad!'; sys.exit(0)
Nt = id_fi.dimensions['time'].size ; # Not unlimited in downloaded files...
print(' *** Input file: Ni, Nj, Nt = ', Ni, Nj, Nt, '\n')
Nt_tot_daily = Nt_tot_daily + Nt
#vtime_d = id_fi.variables['time'][:]
id_fi.close()
# The 12 month for the daily field have been downloaded
print('\n Number of days for year '+str(yyyy)+': '+str(Nt_tot_daily-2))
xdata_d = nmp.zeros((nbfld_daily,Nt_tot_daily)) ; # before and after
vtime_d = nmp.zeros( Nt_tot_daily )
# Reading the 12 files and filling the vtime_d and xdata_d (whole time-series for current year):
jt = 0
for jm in range(12):
cm = '%2.2i'%(jm+1)
cf_fi_d = 'ERA5_arctic_surface__BOX-5x5deg_'+cinfo_box5x5+'__'+str(yyyy)+cm+'_daily.nc'
if cm == '01':
id_fi = Dataset(cf_fi_d+'.before')
vt = id_fi.variables['time'][:]
if len(vt) != 1: print('ERROR #1!'); sys.exit(1)
if jt==0: cunit_t = id_fi.variables['time'].units
#
vlon = id_fi.variables['longitude'][:]
vlat = id_fi.variables['latitude'][:]
ip = nmp.argmin(nmp.abs(vlon-plon))
jp = nmp.argmin(nmp.abs(vlat-plat))
print(' *** ip, jp =', ip, jp)
#
vtime_d[jt] = vt[0]
jv = 0
for cv in list_var_daily_expected:
xdata_d[jv,jt] = id_fi.variables[cv][0,jp,ip]
jv=jv+1
id_fi.close()
jt = jt + 1
# Always:
id_fi = Dataset(cf_fi_d)
vt = id_fi.variables['time'][:]
Nt = len(vt)
vtime_d[jt:Nt+jt] = vt[:]
jv = 0
for cv in list_var_daily_expected:
xdata_d[jv,jt:Nt+jt] = id_fi.variables[cv][:,jp,ip]
jv=jv+1
id_fi.close()
jt = jt + Nt
if cm == '12':
id_fi = Dataset(cf_fi_d+'.after')
vt = id_fi.variables['time'][:]
if len(vt) != 1: print('ERROR #1!'); sys.exit(2)
vtime_d[jt] = vt[0]
jv = 0
for cv in list_var_daily_expected:
xdata_d[jv,jt] = id_fi.variables[cv][0,jp,ip]
jv=jv+1
id_fi.close()
jt = jt + 1
print('\n jt, Nt_tot_daily =', jt, Nt_tot_daily)
# Debug to control...
print('\n\n')
for jt in range(Nt_tot_daily):
print(jt, vtime_d[jt], num2date(vtime_d[jt], units=cunit_t))
print('\n\n')
## ===> so vtime_d[:] and xdata_d[:,:] is what needs to be interpolated later !
#sys.exit(0)
#################
# Hourly fields #
#################
for jm in range(12):
cm = '%2.2i'%(jm+1)
cf_fi = 'ERA5_arctic_surface__BOX-5x5deg_'+cinfo_box5x5+'__'+str(yyyy)+cm+'.nc'
cf_fo = 'ERA5_arctic_surface_'+cinfo_coord+'_1h_y'+str(yyyy)+'m'+cm+'.nc'
if not path.exists(cf_fi):
print('\nDoing month '+cm+' !')
c.retrieve(
'reanalysis-era5-single-levels',
{
'product_type': 'reanalysis',
'format': 'netcdf',
'variable': lvdl_h,
'year': str(yyyy),
'month': [ cm, ],
'day': [
'01', '02', '03',
'04', '05', '06',
'07', '08', '09',
'10', '11', '12',
'13', '14', '15',
'16', '17', '18',
'19', '20', '21',
'22', '23', '24',
'25', '26', '27',
'28', '29', '30',
'31',
],
'time': [
'00:00', '01:00', '02:00',
'03:00', '04:00', '05:00',
'06:00', '07:00', '08:00',
'09:00', '10:00', '11:00',
'12:00', '13:00', '14:00',
'15:00', '16:00', '17:00',
'18:00', '19:00', '20:00',
'21:00', '22:00', '23:00',
],
'area': [
irng_lat[1], irng_lon[0], irng_lat[0],
irng_lon[1],
],
},
cf_fi )
else:
print('\nAlready done month '+cm+' !')
print('')
# Gonna fix this crap!
id_fi = Dataset(cf_fi)
# 1/ populate variables and check it's what's expected:
list_var = list(id_fi.variables.keys())
print(' *** list_var =', list_var)
if list_var[:3] != list_crd_expected:
print(' ERROR this is not the list of coordinates we expected...') ; sys.exit(0)
if list_var[3:] != list_var_expected:
print(' ERROR this is not the list of variables we expected...') ; sys.exit(0)
Ni = id_fi.dimensions['longitude'].size
Nj = id_fi.dimensions['latitude'].size
#if not id_fi.dimensions['time'].isunlimited(): print 'PROBLEM: the time dimension is not UNLIMITED! Bad!'; sys.exit(0)
Nt = id_fi.dimensions['time'].size ; # Not unlimited in downloaded files...
print(' *** Input file: Ni, Nj, Nt = ', Ni, Nj, Nt, '\n')
vlon = id_fi.variables['longitude'][:] ; cunt_lon = id_fi.variables['longitude'].units ; clnm_lon = id_fi.variables['longitude'].long_name
vlat = id_fi.variables['latitude'][:] ; cunt_lat = id_fi.variables['latitude'].units ; clnm_lat = id_fi.variables['latitude'].long_name
vtime = id_fi.variables['time'][:] ; cunt_tim = id_fi.variables['time'].units ; clnm_tim = id_fi.variables['time'].long_name
ip = nmp.argmin(nmp.abs(vlon-plon))
jp = nmp.argmin(nmp.abs(vlat-plat))
print(' *** ip, jp =', ip, jp)
# Creating output file for ocean:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ni = 3 ; nj = 3
jr1 = 0 ; jr2 = 0
id_fo = Dataset(cf_fo, 'w', format='NETCDF4')
# Dimensions:
id_fo.createDimension('x' , ni )
id_fo.createDimension('y' , nj )
id_fo.createDimension(cv_tim, None)
# Variables
ido_lon = id_fo.createVariable(cv_lon, 'f4', ('y','x',), zlib=True) ; ido_lon.units = cunt_lon ; ido_lon.long_name = clnm_lon #
ido_lat = id_fo.createVariable(cv_lat, 'f4', ('y','x',), zlib=True) ; ido_lat.units = cunt_lat ; ido_lat.long_name = clnm_lat
ido_tim = id_fo.createVariable(cv_tim, 'f4', (cv_tim,) , zlib=True) ; ido_tim.units = cunt_tim ; ido_tim.long_name = clnm_tim
# Creating fields in output file:
ido_var = []
iv = 0
for cvar in list_var_expected:
ido_var.append(id_fo.createVariable(cvar, 'f4', (cv_tim,'y','x',), zlib=True))
if cvar in list_flx:
idx = list_flx.index(cvar)
ido_var[iv].units = list_fnu[idx]
elif cvar in list_temp_to_degC:
ido_var[iv].units = 'degC'
else:
ido_var[iv].units = id_fi.variables[cvar].units
ido_var[iv].long_name = id_fi.variables[cvar].long_name
iv = iv + 1
# Dayly fields that will be interpolated in here:
if l_interp_daily: #lulu
for cvar in list_var_daily_expected:
ido_var.append(id_fo.createVariable(cvar, 'f4', (cv_tim,'y','x',), zlib=True))
if cvar in list_temp_to_degC:
ido_var[iv].units = 'degC'
else:
ido_var[iv].units = 'boo'
ido_var[iv].long_name = 'boo'
iv = iv + 1
# Creating extra fields:
for cvar in list_extra:
ido_var.append(id_fo.createVariable(cvar, 'f4', (cv_tim,'y','x',), zlib=True))
ido_var[iv].units = cunt_extra[iv-nbfld_tot]
ido_var[iv].long_name = clnm_extra[iv-nbfld_tot]
iv = iv + 1
# Filling coordinates:
ido_lon[:,:] = vlon[ip]
ido_lat[:,:] = vlat[jp]
# Filling fields
for jt in range(Nt):
rt = vtime[jt] ; # current time!
ido_tim[jt] = rt
iv = 0
for cvar in list_var_expected:
ido_var[iv][jt,:,:] = id_fi.variables[cvar][jt,jp,ip]
#
# Flux conversion ???
if cvar in list_flx:
idx = list_flx.index(cvar)
ido_var[iv][jt,:,:] = ido_var[iv][jt,:,:] * fact_flx[idx]
if cvar in list_temp_to_degC:
ido_var[iv][jt,:,:] = ido_var[iv][jt,:,:] - 273.15
#
iv = iv + 1
# Dayly fields that will be interpolated in here:
if l_interp_daily:
#
#print('LOLO: current time is rt =', rt)
for jtd in range(jr1, Nt_tot_daily-1):
if vtime_d[jtd] <= rt and vtime_d[jtd+1] > rt:
jr1 = jtd ; jr2 = jtd + 1
break
#print('LOLO: what we found is:', vtime_d[jr1], vtime_d[jr2])
#
ivd = 0
for cvar in list_var_daily_expected:
# Linear interpolation !!!
rslope = (xdata_d[ivd,jr2] - xdata_d[ivd,jr1])/(vtime_d[jr2] - vtime_d[jr1])
ido_var[iv][jt,:,:] = xdata_d[ivd,jr1] + rslope*(rt - vtime_d[jr1])
if cvar in list_temp_to_degC: ido_var[iv][jt,:,:] = ido_var[iv][jt,:,:] - 273.15
iv = iv + 1 ; ivd = ivd + 1
for cvar in list_extra:
ido_var[iv][jt,:,:] = rval_extra[iv-nbfld_tot]
iv = iv + 1
id_fo.About = "Input file for 'STATION_ASF' NEMO test-case, generated with 'download_prepare_ERA5_for_SASF.py' of AeroBulk (https://github.com/brodeau/aerobulk)."
id_fi.close()
id_fo.close()
|
brodeau/aerobulk
|
python/misc/download_prepare_ERA5_for_SASF.py
|
Python
|
gpl-3.0
| 17,247
|
[
"NetCDF"
] |
d5e29cf044577ff71ed8910b1ab40bde3d4f7773dbe15298c4d21a4f14a48785
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 16 17:56:14 2016
@author: agiovann
"""
from __future__ import division
from __future__ import print_function
from past.builtins import basestring
from builtins import zip
from builtins import map
from builtins import str
from builtins import range
from past.utils import old_div
import os
import cv2
import h5py
import numpy as np
import pylab as pl
from glob import glob
# import ca_source_extraction as cse
import caiman as cb
from scipy import signal
import scipy
import sys
from ipyparallel import Client
from time import time
from scipy.sparse import csc, csr, coo_matrix
from scipy.spatial.distance import cdist
from scipy import ndimage
from scipy.optimize import linear_sum_assignment
from sklearn.utils.linear_assignment_ import linear_assignment
import re
import pickle
#%% Process triggers
def extract_triggers(file_list, read_dictionaries=False):
"""Extract triggers from Bens' tiff file and create readable dictionaries
Parameterskdkd
-----------
file_list: list of tif files or npz files containing the iage description
Returns
-------
triggers: list
[idx_CS, idx_US, trial_type, number_of_frames]. Trial types: 0 CS alone, 1 US alone, 2 CS US
trigger_names: list
file name associated (without extension)
Example:
fls=glob.glob('2016*.tif')
fls.sort()
triggers,trigger_names=extract_triggers(fls[:5],read_dictionaries=False)
np.savez('all_triggers.npz',triggers=triggers,trigger_names=trigger_names)
"""
triggers = []
trigger_names = []
for fl in file_list:
print(fl)
fn = fl[:-4] + '_ImgDescr.npz'
if read_dictionaries:
with np.load(fn) as idr:
image_descriptions = idr['image_descriptions']
else:
image_descriptions = cb.utils.get_image_description_SI(fl)
print('*****************')
np.savez(fn, image_descriptions=image_descriptions)
trig_vect = np.zeros(4) * np.nan
for idx, image_description in enumerate(image_descriptions):
i2cd = image_description['I2CData']
if isinstance(i2cd, basestring):
if i2cd.find('US_ON') >= 0:
trig_vect[1] = image_description['frameNumberAcquisition'] - 1
if i2cd.find('CS_ON') >= 0:
trig_vect[0] = image_description['frameNumberAcquisition'] - 1
if np.nansum(trig_vect > 0) == 2:
trig_vect[2] = 2
elif trig_vect[0] > 0:
trig_vect[2] = 0
elif trig_vect[1] > 0:
trig_vect[2] = 1
else:
raise Exception('No triggers present in trial')
trig_vect[3] = idx + 1
triggers.append(trig_vect)
trigger_names.append(fl[:-4])
print((triggers[-1]))
return triggers, trigger_names
#%%
def downsample_triggers(triggers, fraction_downsample=1):
""" downample triggers so as to make them in line with the movies
Parameters
----------
triggers: list=Ftraces[idx]
output of extract_triggers function
fraction_downsample: float
fraction the data is shrinked in the time axis
"""
triggers[:, [0, 1, 3]] = np.round(
triggers[:, [0, 1, 3]] * fraction_downsample)
# triggers[-1,[0,1,3]]=np.floor(triggers[-1,[0,1,3]]*fraction_downsample)
# triggers[-1]=np.cumsum(triggers[-1])
# real_triggers=triggers[:-1]+np.concatenate([np.atleast_1d(0), triggers[-1,:-1]])[np.newaxis,:]
#
# trg=real_triggers[1][triggers[-2]==2]+np.arange(-5,8)[:,np.newaxis]
#
# trg=np.int64(trg)
return triggers
#%%
def get_behavior_traces(fname, t0, t1, freq, ISI, draw_rois=False, plot_traces=False, mov_filt_1d=True, window_hp=201, window_lp=3, interpolate=True, EXPECTED_ISI=.25):
"""
From hdf5 movies extract eyelid closure and wheel movement
Parameters
----------
fname: str
file name of the hdf5 file
t0,t1: float.
Times of beginning and end of trials (in general 0 and 8 for our dataset) to build the absolute time vector
freq: float
frequency used to build the final time vector
ISI: float
inter stimulu interval
draw_rois: bool
whether to manually draw the eyelid contour
plot_traces: bool
whether to plot the traces during extraction
mov_filt_1d: bool
whether to filter the movie after extracting the average or ROIs. The alternative is a 3D filter that can be very computationally expensive
window_lp, window_hp: ints
number of frames to be used to median filter the data. It is needed because of the light IR artifact coming out of the eye
Returns
-------
res: dict
dictionary with fields
'eyelid': eyelid trace
'wheel': wheel trace
'time': absolute tim vector
'trials': corresponding indexes of the trials
'trial_info': for each trial it returns start trial, end trial, time CS, time US, trial type (CS:0 US:1 CS+US:2)
'idx_CS_US': idx trial CS US
'idx_US': idx trial US
'idx_CS': idx trial CS
"""
CS_ALONE = 0
US_ALONE = 1
CS_US = 2
meta_inf = fname[:-7] + 'data.h5'
time_abs = np.linspace(t0, t1, freq * (t1 - t0))
T = len(time_abs)
t_us = 0
t_cs = 0
n_samples_ISI = np.int(ISI * freq)
t_uss = []
ISIs = []
eye_traces = []
wheel_traces = []
trial_info = []
tims = []
with h5py.File(fname) as f:
with h5py.File(meta_inf) as dt:
rois = np.asarray(dt['roi'], np.float32)
trials = list(f.keys())
trials.sort(key=lambda x: np.int(x.replace('trial_', '')))
trials_idx = [np.int(x.replace('trial_', '')) - 1 for x in trials]
trials_idx_ = []
for tr, idx_tr in zip(trials[:], trials_idx[:]):
if plot_traces:
pl.cla()
print(tr)
trial = f[tr]
mov = np.asarray(trial['mov'])
if draw_rois:
pl.imshow(np.mean(mov, 0))
pl.xlabel('Draw eye')
pts = pl.ginput(-1)
pts = np.asarray(pts, dtype=np.int32)
data = np.zeros(np.shape(mov)[1:], dtype=np.int32)
# if CV_VERSION == 2:
#lt = cv2.CV_AA
# elif CV_VERSION == 3:
lt = cv2.LINE_AA
cv2.fillConvexPoly(data, pts, (1, 1, 1), lineType=lt)
rois[0] = data
pl.close()
pl.imshow(np.mean(mov, 0))
pl.xlabel('Draw wheel')
pts = pl.ginput(-1)
pts = np.asarray(pts, dtype=np.int32)
data = np.zeros(np.shape(mov)[1:], dtype=np.int32)
# if CV_VERSION == 2:
#lt = cv2.CV_AA
# elif CV_VERSION == 3:
lt = cv2.LINE_AA
cv2.fillConvexPoly(data, pts, (1, 1, 1), lineType=lt)
rois[1] = data
pl.close()
# eye_trace=np.mean(mov*rois[0],axis=(1,2))
# mov_trace=np.mean((np.diff(np.asarray(mov,dtype=np.float32),axis=0)**2)*rois[1],axis=(1,2))
mov = np.transpose(mov, [0, 2, 1])
mov = mov[:, :, ::-1]
if mov.shape[0] > 0:
ts = np.array(trial['ts'])
if np.size(ts) > 0:
assert np.std(
np.diff(ts)) < 0.005, 'Time stamps of behaviour are unreliable'
if interpolate:
new_ts = np.linspace(
0, ts[-1, 0] - ts[0, 0], np.shape(mov)[0])
if dt['trials'][idx_tr, -1] == US_ALONE:
t_us = np.maximum(
t_us, dt['trials'][idx_tr, 3] - dt['trials'][idx_tr, 0])
mmm = mov[:n_samples_ISI].copy()
mov = mov[:-n_samples_ISI]
mov = np.concatenate([mmm, mov])
elif dt['trials'][idx_tr, -1] == CS_US:
t_cs = np.maximum(
t_cs, dt['trials'][idx_tr, 2] - dt['trials'][idx_tr, 0])
t_us = np.maximum(
t_us, dt['trials'][idx_tr, 3] - dt['trials'][idx_tr, 0])
t_uss.append(t_us)
ISI = t_us - t_cs
ISIs.append(ISI)
n_samples_ISI = np.int(ISI * freq)
else:
t_cs = np.maximum(
t_cs, dt['trials'][idx_tr, 2] - dt['trials'][idx_tr, 0])
new_ts = new_ts
tims.append(new_ts)
else:
start, end, t_CS, t_US = dt['trials'][idx_tr,
:-1] - dt['trials'][idx_tr, 0]
f_rate = np.median(np.diff(ts[:, 0]))
ISI = t_US - t_CS
idx_US = np.int(old_div(t_US, f_rate))
idx_CS = np.int(old_div(t_CS, f_rate))
fr_before_US = np.int(
old_div((t_US - start - .1), f_rate))
fr_after_US = np.int(
old_div((end - .1 - t_US), f_rate))
idx_abs = np.arange(-fr_before_US, fr_after_US)
time_abs = idx_abs * f_rate
assert np.abs(ISI - EXPECTED_ISI) < .01, str(np.abs(ISI - EXPECTED_ISI)) + \
':the distance form CS and US is different from what expected'
# trig_US=
# new_ts=
mov_e = cb.movie(
mov * rois[0][::-1].T, fr=old_div(1, np.mean(np.diff(new_ts))))
mov_w = cb.movie(
mov * rois[1][::-1].T, fr=old_div(1, np.mean(np.diff(new_ts))))
x_max_w, y_max_w = np.max(np.nonzero(np.max(mov_w, 0)), 1)
x_min_w, y_min_w = np.min(np.nonzero(np.max(mov_w, 0)), 1)
x_max_e, y_max_e = np.max(np.nonzero(np.max(mov_e, 0)), 1)
x_min_e, y_min_e = np.min(np.nonzero(np.max(mov_e, 0)), 1)
mov_e = mov_e[:, x_min_e:x_max_e, y_min_e:y_max_e]
mov_w = mov_w[:, x_min_w:x_max_w, y_min_w:y_max_w]
# mpart=mov[:20].copy()
# md=cse.utilities.mode_robust(mpart.flatten())
# N=np.sum(mpart<=md)
# mpart[mpart>md]=md
# mpart[mpart==0]=md
# mpart=mpart-md
# std=np.sqrt(np.sum(mpart**2)/N)
# thr=md+10*std
#
# thr=np.minimum(255,thr)
# return mov
if mov_filt_1d:
mov_e = np.mean(mov_e, axis=(1, 2))
window_hp_ = window_hp
window_lp_ = window_lp
if plot_traces:
pl.plot(old_div((mov_e - np.mean(mov_e)),
(np.max(mov_e) - np.min(mov_e))))
else:
window_hp_ = (window_hp, 1, 1)
window_lp_ = (window_lp, 1, 1)
bl = signal.medfilt(mov_e, window_hp_)
mov_e = signal.medfilt(mov_e - bl, window_lp_)
if mov_filt_1d:
eye_ = np.atleast_2d(mov_e)
else:
eye_ = np.atleast_2d(np.mean(mov_e, axis=(1, 2)))
wheel_ = np.concatenate([np.atleast_1d(0), np.nanmean(
np.diff(mov_w, axis=0)**2, axis=(1, 2))])
if np.abs(new_ts[-1] - time_abs[-1]) > 1:
raise Exception(
'Time duration is significantly larger or smaller than reference time')
wheel_ = np.squeeze(wheel_)
eye_ = np.squeeze(eye_)
f1 = scipy.interpolate.interp1d(
new_ts, eye_, bounds_error=False, kind='linear')
eye_ = np.array(f1(time_abs))
f1 = scipy.interpolate.interp1d(
new_ts, wheel_, bounds_error=False, kind='linear')
wheel_ = np.array(f1(time_abs))
if plot_traces:
pl.plot(
old_div((eye_), (np.nanmax(eye_) - np.nanmin(eye_))), 'r')
pl.plot(old_div((wheel_ - np.nanmin(wheel_)),
np.nanmax(wheel_)), 'k')
pl.pause(.01)
trials_idx_.append(idx_tr)
eye_traces.append(eye_)
wheel_traces.append(wheel_)
trial_info.append(dt['trials'][idx_tr, :])
res = dict()
res['eyelid'] = eye_traces
res['wheel'] = wheel_traces
res['time'] = time_abs - np.median(t_uss)
res['trials'] = trials_idx_
res['trial_info'] = trial_info
res['idx_CS_US'] = np.where(
list(map(int, np.array(trial_info)[:, -1] == CS_US)))[0]
res['idx_US'] = np.where(
list(map(int, np.array(trial_info)[:, -1] == US_ALONE)))[0]
res['idx_CS'] = np.where(
list(map(int, np.array(trial_info)[:, -1] == CS_ALONE)))[0]
return res
#%%
def process_eyelid_traces(traces, time_vect, idx_CS_US, idx_US, idx_CS, thresh_CR=.1, time_CR_on=-.1, time_US_on=.05):
"""
preprocess traces output of get_behavior_traces
Parameters:
----------
traces: ndarray (N trials X t time points)
eyelid traces output of get_behavior_traces.
thresh_CR: float
fraction of eyelid closure considered a CR
time_CR_on: float
time of alleged beginning of CRs
time_US_on: float
time when US is considered to induce have a UR
Returns:
-------
eye_traces: ndarray
normalized eyelid traces
trigs: dict
dictionary containing various subdivision of the triggers according to behavioral responses
'idxCSUSCR': index of trials with CS+US with CR
'idxCSUSNOCR': index of trials with CS+US without CR
'idxCSCR':
'idxCSNOCR':
'idxNOCR': index of trials with no CRs
'idxCR': index of trials with CRs
'idxUS':
"""
# normalize by max amplitudes at US
eye_traces = old_div(traces, np.nanmax(np.nanmedian(traces[np.hstack(
[idx_CS_US, idx_US])][:, np.logical_and(time_vect > time_US_on, time_vect < time_US_on + .4)], 0)))
amplitudes_at_US = np.mean(eye_traces[:, np.logical_and(
time_vect > time_CR_on, time_vect <= time_US_on)], 1)
trigs = dict()
trigs['idxCSUSCR'] = idx_CS_US[np.where(
amplitudes_at_US[idx_CS_US] > thresh_CR)[-1]]
trigs['idxCSUSNOCR'] = idx_CS_US[np.where(
amplitudes_at_US[idx_CS_US] < thresh_CR)[-1]]
trigs['idxCSCR'] = idx_CS[np.where(
amplitudes_at_US[idx_CS] > thresh_CR)[-1]]
trigs['idxCSNOCR'] = idx_CS[np.where(
amplitudes_at_US[idx_CS] < thresh_CR)[-1]]
trigs['idxNOCR'] = np.union1d(trigs['idxCSUSNOCR'], trigs['idxCSNOCR'])
trigs['idxCR'] = np.union1d(trigs['idxCSUSCR'], trigs['idxCSCR'])
trigs['idxUS'] = idx_US
return eye_traces, amplitudes_at_US, trigs
#%%
def process_wheel_traces(traces, time_vect, thresh_MOV_iqr=3, time_CS_on=-.25, time_US_on=0):
tmp = traces[:, time_vect < time_CS_on]
wheel_traces = old_div(
traces, (np.percentile(tmp, 75) - np.percentile(tmp, 25)))
movement_at_CS = np.max(wheel_traces[:, np.logical_and(
time_vect > time_CS_on, time_vect <= time_US_on)], 1)
trigs = dict()
trigs['idxMOV'] = np.where(movement_at_CS > thresh_MOV_iqr)[-1]
trigs['idxNO_MOV'] = np.where(movement_at_CS < thresh_MOV_iqr)[-1]
return wheel_traces, movement_at_CS, trigs
#%%
def process_wheel_traces_talmo(wheel_mms_TM_, timestamps_TM_, tm, thresh_MOV=.2, time_CS_on=-.25, time_US_on=0):
wheel_traces = []
for tr_, tm_ in zip(wheel_mms_TM_, timestamps_TM_):
if len(tm_) < len(tm):
#print ['Adjusting the samples:',len(tm)-len(tm_)]
wheel_traces.append(
np.pad(tr_, (0, len(tm) - len(tm_)), mode='edge'))
elif len(tm_) > len(tm):
wheel_traces.append(tr_[len(tm_) - len(tm):])
#print ['Removing the samples:',len(tm)-len(tm_)]
else:
wheel_traces.append(tr_)
# wheel_traces=np.abs(np.array(wheel_traces))/10 # to cm
# tmp = traces[:,time_vect<time_CS_on]
wheel_traces = np.abs(np.array(wheel_traces))
# wheel_traces=traces/(np.percentile(tmp,75)-np.percentile(tmp,25))
movement_at_CS = np.max(
wheel_traces[:, np.logical_and(tm > time_CS_on, tm <= time_US_on)], 1)
trigs = dict()
trigs['idxMOV'] = np.where(movement_at_CS > thresh_MOV)[-1]
trigs['idxNO_MOV'] = np.where(movement_at_CS < thresh_MOV)[-1]
return wheel_traces, movement_at_CS, trigs
#%%
def load_results(f_results):
"""
Load results from CNMF on various FOVs and merge them after some preprocessing
"""
# load data
i = 0
A_s = []
C_s = []
YrA_s = []
Cn_s = []
shape = None
b_s = []
f_s = []
for f_res in f_results:
print(f_res)
i += 1
with np.load(f_res) as ld:
A_s.append(csc.csc_matrix(ld['A2']))
C_s.append(ld['C2'])
YrA_s.append(ld['YrA'])
Cn_s.append(ld['Cn'])
b_s.append(ld['b2'])
f_s.append(ld['f2'])
if shape is not None:
shape_new = (ld['d1'], ld['d2'])
if shape_new != shape:
raise Exception('Shapes of FOVs not matching')
else:
shape = shape_new
else:
shape = (ld['d1'], ld['d2'])
return A_s, C_s, YrA_s, Cn_s, b_s, f_s, shape
#%% threshold and remove spurious components
def threshold_components(A_s, shape, min_size=5, max_size=np.inf, max_perc=.5, remove_unconnected_components=True):
"""
Threshold components output of a CNMF algorithm (A matrices)
Parameters:
----------
A_s: list
list of A matrice output from CNMF
min_size: int
min size of the component in pixels
max_size: int
max size of the component in pixels
max_perc: float
fraction of the maximum of each component used to threshold
remove_unconnected_components: boolean
whether to remove components that are fragmented in space
Returns:
-------
B_s: list of the thresholded components
lab_imgs: image representing the components in ndimage format
cm_s: center of masses of each components
"""
B_s = []
lab_imgs = []
cm_s = []
for A_ in A_s:
print('*')
max_comps = A_.max(0).todense().T
tmp = []
cm = []
lim = np.zeros(shape)
for idx, a in enumerate(A_.T):
# create mask by thresholding to 50% of the max
mask = np.reshape(a.todense() > (max_comps[idx] * max_perc), shape)
label_im, nb_labels = ndimage.label(mask)
sizes = ndimage.sum(mask, label_im, list(range(nb_labels + 1)))
if remove_unconnected_components:
l_largest = (label_im == np.argmax(sizes))
cm.append(scipy.ndimage.measurements.center_of_mass(
l_largest, l_largest))
lim[l_largest] = (idx + 1)
# #remove connected components that are too small
mask_size = np.logical_or(sizes < min_size, sizes > max_size)
if np.sum(mask_size[1:]) > 1:
print(
('removing ' + str(np.sum(mask_size[1:]) - 1) + ' components'))
remove_pixel = mask_size[label_im]
label_im[remove_pixel] = 0
label_im = (label_im > 0) * 1
tmp.append(label_im.flatten())
cm_s.append(cm)
lab_imgs.append(lim)
B_s.append(csc.csc_matrix(np.array(tmp)).T)
return B_s, lab_imgs, cm_s
#%% compute mask distances
def distance_masks(M_s, cm_s, max_dist):
"""
Compute distance matrix based on an intersection over union metric. Matrix are compared in order, with matrix i compared with matrix i+1
Parameters
----------
M_s: list of ndarrays
The thresholded A matrices (masks) to compare, output of threshold_components
cm_s: list of list of 2-ples
the centroids of the components in each M_s
max_dist: float
maximum distance among centroids allowed between components. This corresponds to a distance at which two components are surely disjoined
Returns:
--------
D_s: list of matrix distances
"""
D_s = []
for M1, M2, cm1, cm2 in zip(M_s[:-1], M_s[1:], cm_s[:-1], cm_s[1:]):
print('New Pair **')
M1 = M1.copy()[:, :]
M2 = M2.copy()[:, :]
d_1 = np.shape(M1)[-1]
d_2 = np.shape(M2)[-1]
D = np.ones((d_1, d_2))
cm1 = np.array(cm1)
cm2 = np.array(cm2)
for i in range(d_1):
if i % 100 == 0:
print(i)
k = M1[:, np.repeat(i, d_2)] + M2
# h=M1[:,np.repeat(i,d_2)].copy()
# h.multiply(M2)
for j in range(d_2):
dist = np.linalg.norm(cm1[i] - cm2[j])
if dist < max_dist:
union = k[:, j].sum()
# intersection = h[:,j].nnz
intersection = np.array(
M1[:, i].T.dot(M2[:, j]).todense()).squeeze()
## intersect= np.sum(np.logical_xor(M1[:,i],M2[:,j]))
# union=np.sum(np.logical_or(M1[:,i],M2[:,j]))
if union > 0:
D[i, j] = 1 - 1. * intersection / \
(union - intersection)
else:
# print 'empty component: setting distance to max'
D[i, j] = 1.
if np.isnan(D[i, j]):
raise Exception('Nan value produced. Error in inputs')
else:
D[i, j] = 1
D_s.append(D)
return D_s
#%% find matches
def find_matches(D_s, print_assignment=False):
matches = []
costs = []
t_start = time()
for ii, D in enumerate(D_s):
DD = D.copy()
if np.sum(np.where(np.isnan(DD))) > 0:
raise Exception('Distance Matrix contains NaN, not allowed!')
# indexes = m.compute(DD)
# indexes = linear_assignment(DD)
indexes = linear_sum_assignment(DD)
indexes2 = [(ind1, ind2) for ind1, ind2 in zip(indexes[0], indexes[1])]
matches.append(indexes)
DD = D.copy()
total = []
for row, column in indexes2:
value = DD[row, column]
if print_assignment:
print(('(%d, %d) -> %f' % (row, column, value)))
total.append(value)
print(('FOV: %d, shape: %d,%d total cost: %f' %
(ii, DD.shape[0], DD.shape[1], np.sum(total))))
print((time() - t_start))
costs.append(total)
return matches, costs
#%%
def link_neurons(matches, costs, max_cost=0.6, min_FOV_present=None):
"""
Link neurons from different FOVs given matches and costs obtained from the hungarian algorithm
Parameters
----------
matches: lists of list of tuple
output of the find_matches function
costs: list of lists of scalars
cost associated to each match in matches
max_cost: float
maximum allowed value of the 1- intersection over union metric
min_FOV_present: int
number of FOVs that must consequently contain the neuron starting from 0. If none
the neuro must be present in each FOV
Returns:
--------
neurons: list of arrays representing the indices of neurons in each FOV
"""
if min_FOV_present is None:
min_FOV_present = len(matches)
neurons = []
num_neurons = 0
# Yr_tot=[]
num_chunks = len(matches) + 1
for idx in range(len(matches[0][0])):
neuron = []
neuron.append(idx)
# Yr=YrA_s[0][idx]+C_s[0][idx]
for match, cost, chk in zip(matches, costs, list(range(1, num_chunks))):
rows, cols = match
m_neur = np.where(rows == neuron[-1])[0].squeeze()
if m_neur.size > 0:
if cost[m_neur] <= max_cost:
neuron.append(cols[m_neur])
# Yr=np.hstack([Yr,YrA_s[chk][idx]+C_s[chk][idx]])
else:
break
else:
break
if len(neuron) > min_FOV_present:
num_neurons += 1
neurons.append(neuron)
# Yr_tot.append(Yr)
neurons = np.array(neurons).T
print(('num_neurons:' + str(num_neurons)))
# Yr_tot=np.array(Yr_tot)
return neurons
#%%
def generate_linked_traces(mov_names, chunk_sizes, A, b, f):
"""
Generate traces (DFF,BL and DF) for a group of movies that share the same A,b and f,
by applying the same transformation over a set of movies. This removes
the contamination of neuropil and then masks the components.
Parameters:
-----------
mov_names: list of path to movies associated with the same A,b,and f
chunk_sizes:list containing the number of frames in each movie
A,b and f: from CNMF
Returns:
--------
"""
num_chunks = np.sum(chunk_sizes)
# A = A_s[idx][:,neurons[idx]]
nA = (A.power(2)).sum(0)
# bckg=cb.movie(cb.to_3D(b.dot(f).T,(-1,shape[0],shape[1])),fr=1)
f = np.array(f).squeeze()
# bckg=bckg.resize(1,1,1.*num_chunks/b_size)
b_size = f.shape[0]
# if num_chunks != b_size:
# raise Exception('The number of frames are not matching')
#
counter = 0
f_in = np.atleast_2d(scipy.signal.resample(f, num_chunks))
traces = []
traces_BL = []
traces_DFF = []
for jj, mv in enumerate(mov_names):
mov_chunk_name = os.path.splitext(os.path.split(mv)[-1])[0] + '.hdf5'
mov_chunk_name = os.path.join(os.path.dirname(mv), mov_chunk_name)
print(mov_chunk_name)
m = cb.load(mov_chunk_name).to_2D().T
bckg_1 = b.dot(f_in[:, counter:counter + chunk_sizes[jj]])
m = m - bckg_1
# (m).play(backend='opencv',gain=10.,fr=33)
# m=np.reshape(m,(-1,np.prod(shape)),order='F').T
# bckg_1=np.reshape(bckg_1,(-1,np.prod(shape)),order='F').T
counter += chunk_sizes[jj]
Y_r_sig = A.T.dot(m)
Y_r_sig = scipy.sparse.linalg.spsolve(
scipy.sparse.spdiags(np.sqrt(nA), 0, nA.size, nA.size), Y_r_sig)
traces.append(Y_r_sig)
Y_r_bl = A.T.dot(bckg_1)
Y_r_bl = scipy.sparse.linalg.spsolve(
scipy.sparse.spdiags(np.sqrt(nA), 0, nA.size, nA.size), Y_r_bl)
traces_BL.append(Y_r_bl)
Y_r_bl = cse.utilities.mode_robust(Y_r_bl, 1)
traces_DFF.append(old_div(Y_r_sig, Y_r_bl[:, np.newaxis]))
return traces, traces_DFF, traces_BL
#%%
def extract_traces_mat(traces, triggers_idx, f_rate, time_before=2.7, time_after=5.3):
"""
Equivalent of take for the input format we are using.
Parameters:
-----------
traces: list of ndarrays
each element is one trial, the dimensions are n_neurons x time
triggers_idx: list of ints
one for each element of traces, is the index of the trigger to align the traces to
f_rate: double
frame rate associated to the traces
time_before,time_after: double
time before and after the trigger establishing the boundary of the extracted subtraces
Returns:
--------
traces_mat: matrix containing traces with dimensions trials X cell X time
time_mat: associated time vector
"""
samples_before = np.int(time_before * f_rate)
samples_after = np.int(time_after * f_rate)
if traces[0].ndim > 1:
traces_mat = np.zeros(
[len(traces), len(traces[0]), samples_after + samples_before])
else:
traces_mat = np.zeros([len(traces), 1, samples_after + samples_before])
for idx, tr in enumerate(traces):
# print samples_before,samples_after
# print np.int(triggers_idx[idx]-samples_before),np.int(triggers_idx[idx]+samples_after)
traces_mat[idx] = traces[idx][:, np.int(
triggers_idx[idx] - samples_before):np.int(triggers_idx[idx] + samples_after)]
time_mat = old_div(np.arange(-samples_before, samples_after), f_rate)
return traces_mat, time_mat
#%%
def load_data_from_stored_results(base_folder, load_masks=False, thresh_CR=0.1, threshold_responsiveness=0.1,
is_blob=True, time_CR_on=-.1, time_US_on=.05, thresh_MOV_iqr=1000, time_CS_on_MOV=-.25, time_US_on_MOV=0):
"""
From the partial data stored retrieves variables of interest
"""
import calblitz as cb
import numpy as np
import scipy
import pylab as pl
import pickle
from glob import glob
# base_folder='/mnt/ceph/users/agiovann/ImagingData/eyeblink/b35/20160714143248/'
if is_blob:
with np.load(base_folder + 'distance_masks.npz') as ld:
D_s = ld['D_s']
with np.load(base_folder + 'neurons_matching.npz') as ld:
neurons = ld['neurons']
locals().update(ld)
with np.load(base_folder + 'all_triggers.npz') as at:
triggers_img = at['triggers']
trigger_names_img = at['trigger_names']
if load_masks:
f_results = glob(base_folder + '*results_analysis.npz')
f_results.sort()
for rs in f_results:
print(rs)
print('*****')
A_s, C_s, YrA_s, Cn_s, b_s, f_s, shape = load_results(f_results)
if is_blob:
remove_unconnected_components = True
else:
remove_unconnected_components = False
neurons = []
for xx in A_s:
neurons.append(np.arange(A_s[0].shape[-1]))
# B_s, lab_imgs, cm_s = threshold_components(A_s,shape, min_size=5,max_size=50,max_perc=.5,remove_unconnected_components=remove_unconnected_components)
tmpl_name = glob(base_folder + '*template_total.npz')[0]
with np.load(tmpl_name) as ld:
mov_names_each = ld['movie_names']
A_each = []
b_each = []
f_each = []
for idx, mov_names in enumerate(mov_names_each):
idx = 0
A_each.append(A_s[idx][:, neurons[idx]])
# C=C_s[idx][neurons[idx]]
# YrA=YrA_s[idx][neurons[idx]]
b_each.append(b_s[idx])
f_each.append(f_s[idx])
else:
A_each = []
b_each = []
f_each = []
with np.load(base_folder + 'behavioral_traces.npz') as ld:
res_bt = dict(**ld)
tm = res_bt['time']
f_rate_bh = old_div(1, np.median(np.diff(tm)))
ISI = res_bt['trial_info'][0][3] - res_bt['trial_info'][0][2]
eye_traces = np.array(res_bt['eyelid'])
idx_CS_US = res_bt['idx_CS_US']
idx_US = res_bt['idx_US']
idx_CS = res_bt['idx_CS']
idx_ALL = np.sort(np.hstack([idx_CS_US, idx_US, idx_CS]))
eye_traces, amplitudes_at_US, trig_CRs = process_eyelid_traces(
eye_traces, tm, idx_CS_US, idx_US, idx_CS, thresh_CR=thresh_CR, time_CR_on=time_CR_on, time_US_on=time_US_on)
idxCSUSCR = trig_CRs['idxCSUSCR']
idxCSUSNOCR = trig_CRs['idxCSUSNOCR']
idxCSCR = trig_CRs['idxCSCR']
idxCSNOCR = trig_CRs['idxCSNOCR']
idxNOCR = trig_CRs['idxNOCR']
idxCR = trig_CRs['idxCR']
idxUS = trig_CRs['idxUS']
idxCSCSUS = np.concatenate([idx_CS, idx_CS_US])
with open(base_folder + 'traces.pk', 'r') as f:
trdict = pickle.load(f)
traces_DFF = trdict['traces_DFF']
triggers_img = np.array(triggers_img)
idx_expected_US = np.zeros_like(triggers_img[:, 1])
idx_expected_US = triggers_img[:, 1]
idx_expected_US[idx_CS] = np.nanmedian(triggers_img[:, 1])
triggers_img = np.concatenate(
[triggers_img, idx_expected_US[:, np.newaxis].astype(np.int)], -1)
img_descr = cb.utils.get_image_description_SI(
glob(base_folder + '2016*.tif')[0])[0]
f_rate = img_descr['scanimage.SI.hRoiManager.scanFrameRate']
print(f_rate)
#%%
time_before = 3
time_after = 3
wheel, time_w = res_bt['wheel'], res_bt['time']
eye = eye_traces
time_e = tm
wheel_mat = np.array(
[wh[np.logical_and(time_w > -time_before, time_w < time_after)] for wh in wheel])
eye_mat = np.array(
[e[np.logical_and(time_e > -time_before, time_e < time_after)] for e in eye])
time_w_mat = time_w[np.logical_and(
time_w > -time_before, time_w < time_after)]
time_e_mat = time_e[np.logical_and(
time_e > -time_before, time_e < time_after)]
traces_mat, time_mat = extract_traces_mat(
traces_DFF, triggers_img[:, 1], f_rate, time_before=time_before, time_after=time_after)
# traces_mat,time_mat=scipy.signal.resample(traces_mat, len(time_w_mat),t=time_mat ,axis=-1)
#%
wheel_traces, movement_at_CS, trigs_mov = process_wheel_traces(np.array(
res_bt['wheel']), tm, thresh_MOV_iqr=thresh_MOV_iqr, time_CS_on=time_CS_on_MOV, time_US_on=time_US_on_MOV)
print('fraction with movement:')
print((len(trigs_mov['idxMOV']) * 1. / len(trigs_mov['idxNO_MOV'])))
#%%
triggers_out = dict()
triggers_out['mn_idx_CS_US'] = np.intersect1d(
idx_CS_US, trigs_mov['idxNO_MOV'])
triggers_out['nm_idx_US'] = np.intersect1d(idx_US, trigs_mov['idxNO_MOV'])
triggers_out['nm_idx_CS'] = np.intersect1d(idx_CS, trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSUSCR'] = np.intersect1d(
idxCSUSCR, trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSUSNOCR'] = np.intersect1d(
idxCSUSNOCR, trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSCR'] = np.intersect1d(
idxCSCR, trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSNOCR'] = np.intersect1d(
idxCSNOCR, trigs_mov['idxNO_MOV'])
triggers_out['nm_idxNOCR'] = np.intersect1d(
idxNOCR, trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCR'] = np.intersect1d(idxCR, trigs_mov['idxNO_MOV'])
triggers_out['nm_idxUS'] = np.intersect1d(idxUS, trigs_mov['idxNO_MOV'])
triggers_out['nm_idxCSCSUS'] = np.intersect1d(
idxCSCSUS, trigs_mov['idxNO_MOV'])
#%%
newf_rate = old_div(1, np.median(np.diff(time_mat)))
ftraces = traces_mat.copy()
samples_before = np.int(time_before * newf_rate)
ISI_frames = np.int(ISI * newf_rate)
ftraces = ftraces - np.median(ftraces[:, :, np.logical_and(
time_mat > -1, time_mat < -ISI)], axis=(2))[:, :, np.newaxis]
amplitudes_responses = np.mean(
ftraces[:, :, np.logical_and(time_mat > -.03, time_mat < .04)], -1)
cell_responsiveness = np.median(
amplitudes_responses[triggers_out['nm_idxCSCSUS']], axis=0)
idx_responsive = np.where(cell_responsiveness >
threshold_responsiveness)[0]
fraction_responsive = len(np.where(cell_responsiveness > threshold_responsiveness)[
0]) * 1. / np.shape(ftraces)[1]
print('fraction responsive:')
print(fraction_responsive)
ftraces = ftraces[:, cell_responsiveness > threshold_responsiveness, :]
amplitudes_responses = np.mean(
ftraces[:, :, samples_before + ISI_frames - 1:samples_before + ISI_frames + 1], -1)
traces = dict()
traces['fluo_traces'] = ftraces
traces['eye_traces'] = eye_mat
traces['wheel_traces'] = wheel_mat
traces['time_fluo'] = time_mat
traces['time_eye'] = time_e_mat
traces['time_wheel'] = time_w_mat
amplitudes = dict()
amplitudes['amplitudes_fluo'] = amplitudes_responses
amplitudes['amplitudes_eyelid'] = amplitudes_at_US
masks = dict()
masks['A_each'] = [A[:, idx_responsive] for A in A_each]
masks['b_each'] = b_each
masks['f_each'] = f_each
return traces, masks, triggers_out, amplitudes, ISI
#%%
def fast_process_day(base_folder, min_radius=3, max_radius=4):
import pickle
import pylab as pl
try:
tmpl_name = glob(base_folder + '*template_total.npz')[0]
print(tmpl_name)
with np.load(tmpl_name) as ld:
mov_names_each = ld['movie_names']
f_results = glob(base_folder + '*results_analysis.npz')
f_results.sort()
A_s, C_s, YrA_s, Cn_s, b_s, f_s, shape = load_results(f_results)
# B_s, lab_imgs, cm_s = threshold_components(A_s,shape, min_size=10,max_size=50,max_perc=.5)
traces = []
traces_BL = []
traces_DFF = []
for idx, mov_names in enumerate(mov_names_each):
A = A_s[idx]
# C=C_s[idx][neurons[idx]]
# YrA=YrA_s[idx][neurons[idx]]
b = b_s[idx]
f = f_s[idx]
chunk_sizes = []
for mv in mov_names:
base_name = os.path.splitext(os.path.split(mv)[-1])[0]
with np.load(base_folder + base_name + '.npz') as ld:
TT = len(ld['shifts'])
chunk_sizes.append(TT)
masks_ws, pos_examples, neg_examples = cse.utilities.extract_binary_masks_blob(A, min_radius,
shape, num_std_threshold=1, minCircularity=0.5, minInertiaRatio=0.2, minConvexity=.8)
# sizes=np.sum(masks_ws,(1,2))
# pos_examples=np.intersect1d(pos_examples,np.where(sizes<max_radius**2*np.pi)[0])
print((len(pos_examples)))
# pl.close()
# pl.imshow(np.mean(masks_ws[pos_examples],0))
pl.pause(.1)
# A=A.tocsc()[:,pos_examples]
traces, traces_DFF, traces_BL = generate_linked_traces(
mov_names, chunk_sizes, A, b, f)
np.savez(f_results[idx][:-4] + '_masks.npz', masks_ws=masks_ws,
pos_examples=pos_examples, neg_examples=neg_examples, A=A.todense(), b=b, f=f)
with open(f_results[idx][:-4] + '_traces.pk', 'w') as f:
pickle.dump(dict(traces=traces, traces_BL=traces_BL,
traces_DFF=traces_DFF), f)
except:
print('Failed')
return False
return True
#%%
def process_fast_process_day(base_folders, save_name='temp_save.npz'):
"""
Use this after having used fast_process_day
Parameters:
----------
base_folders: list of path to base folders
Returns:
--------
triggers_chunk_fluo: triggers associated to fluorescence (one per chunk)
eyelid_chunk: eyelid (one per chunk)
wheel_chunk: wheel (one per chunk)
triggers_chunk_bh: triggers associated to behavior(one per chunk)
tm_behav: time of behavior (one per chunk)
names_chunks: names of the file associated to each chunk(one per chunk)
fluo_chunk: fluorescence traces (one per chunk)
pos_examples_chunks: indexes of examples that were classified as good by the blob detector (one per chunk)
A_chunks: masks associated (one per chunk)
"""
triggers_chunk_fluo = []
eyelid_chunk = []
wheel_chunk = []
triggers_chunk_bh = []
tm_behav = []
names_chunks = []
fluo_chunk = []
pos_examples_chunks = []
A_chunks = []
for base_folder in base_folders:
try:
print(base_folder)
with np.load(os.path.join(base_folder, 'all_triggers.npz')) as ld:
triggers = ld['triggers']
trigger_names = ld['trigger_names']
with np.load(glob(os.path.join(base_folder, '*-template_total.npz'))[0]) as ld:
movie_names = ld['movie_names']
template_each = ld['template_each']
idx_chunks = []
for name_chunk in movie_names:
idx_chunks.append([np.int(
re.search('_00[0-9][0-9][0-9]_0', nm).group(0)[2:6]) - 1 for nm in name_chunk])
with np.load(base_folder + 'behavioral_traces.npz') as ld:
res_bt = dict(**ld)
tm = res_bt['time']
f_rate_bh = old_div(1, np.median(np.diff(tm)))
ISI = np.median(
[rs[3] - rs[2] for rs in res_bt['trial_info'][res_bt['idx_CS_US']]])
trig_int = np.hstack([((res_bt['trial_info'][:, 2:4] - res_bt['trial_info'][:, 0][:, None])
* f_rate_bh), res_bt['trial_info'][:, -1][:, np.newaxis]]).astype(np.int)
trig_int[trig_int < 0] = -1
trig_int = np.hstack([trig_int, len(tm) + trig_int[:, :1] * 0])
trig_US = np.argmin(np.abs(tm))
trig_CS = np.argmin(np.abs(tm + ISI))
trig_int[res_bt['idx_CS_US'], 0] = trig_CS
trig_int[res_bt['idx_CS_US'], 1] = trig_US
trig_int[res_bt['idx_US'], 1] = trig_US
trig_int[res_bt['idx_CS'], 0] = trig_CS
eye_traces = np.array(res_bt['eyelid'])
wheel_traces = np.array(res_bt['wheel'])
fls = glob(os.path.join(
base_folder, '*.results_analysis_traces.pk'))
fls.sort()
fls_m = glob(os.path.join(
base_folder, '*.results_analysis_masks.npz'))
fls_m.sort()
for indxs, name_chunk, fl, fl_m in zip(idx_chunks, movie_names, fls, fls_m):
if np.all([nmc[:-4] for nmc in name_chunk] == trigger_names[indxs]):
triggers_chunk_fluo.append(triggers[indxs, :])
eyelid_chunk.append(eye_traces[indxs, :])
wheel_chunk.append(wheel_traces[indxs, :])
triggers_chunk_bh.append(trig_int[indxs, :])
tm_behav.append(tm)
names_chunks.append(fl)
with open(fl, 'r') as f:
tr_dict = pickle.load(f)
print(fl)
fluo_chunk.append(tr_dict['traces_DFF'])
with np.load(fl_m) as ld:
A_chunks.append(scipy.sparse.coo_matrix(ld['A']))
pos_examples_chunks.append(ld['pos_examples'])
else:
raise Exception('Names of triggers not matching!')
except:
print(("ERROR in:" + base_folder))
# raise
import pdb
pdb.set_trace()
if save_name is not None:
np.savez(save_name, triggers_chunk_fluo=triggers_chunk_fluo, triggers_chunk_bh=triggers_chunk_bh, eyelid_chunk=eyelid_chunk, wheel_chunk=wheel_chunk,
tm_behav=tm_behav, fluo_chunk=fluo_chunk, names_chunks=names_chunks, pos_examples_chunks=pos_examples_chunks, A_chunks=A_chunks)
return triggers_chunk_fluo, eyelid_chunk, wheel_chunk, triggers_chunk_bh, tm_behav, names_chunks, fluo_chunk, pos_examples_chunks, A_chunks
|
simonsfoundation/CaImAn
|
use_cases/granule_cells/utils_granule.py
|
Python
|
gpl-2.0
| 44,321
|
[
"NEURON"
] |
e38fca532daf0a1d1f8a6da82e4ce2403d50549c9d3316ec1e91e2c8e56dab12
|
"""
Assesment of Generalized Estimating Equations using simulation.
This script checks Gaussian models.
See the generated file "gee_gaussian_simulation_check.txt" for
results.
"""
from statsmodels.compat.python import lrange
import scipy
import numpy as np
from itertools import product
from statsmodels.genmod.families import Gaussian
from statsmodels.genmod.generalized_estimating_equations import GEE
from statsmodels.genmod.cov_struct import Autoregressive, Nested
class GEE_simulator(object):
#
# Parameters that must be defined
#
# Number of groups
ngroups = None
# Standard deviation of the pure errors
error_sd = None
# The regression coefficients
params = None
# The parameters defining the dependence structure
dep_params = None
# The true scale parameter
scale = None
#
# Output parameters
#
# Matrix of exogeneous data (rows are cases, columns are
# variables)
exog = None
# Matrix of endogeneous data (len(endog) = exog.shape[0])
endog = None
# Matrix of time information (time.shape[0] = len(endog))
time = None
# Group labels (len(groups) = len(endog))
group = None
# Group sizes are random within this range
group_size_range = [4, 11]
# dparams_est is dparams with scale_inv appended
def print_dparams(self, dparams_est):
raise NotImplementedError
class AR_simulator(GEE_simulator):
# The distance function for determining AR correlations.
distfun = [lambda x, y: np.sqrt(np.sum((x-y)**2)),]
def print_dparams(self, dparams_est):
OUT.write("AR coefficient estimate: %8.4f\n" %
dparams_est[0])
OUT.write("AR coefficient truth: %8.4f\n" %
self.dep_params[0])
OUT.write("Error variance estimate: %8.4f\n" %
dparams_est[1])
OUT.write("Error variance truth: %8.4f\n" %
self.error_sd**2)
OUT.write("\n")
def simulate(self):
endog, exog, group, time = [], [], [], []
for i in range(self.ngroups):
gsize = np.random.randint(self.group_size_range[0],
self.group_size_range[1])
group.append([i,] * gsize)
time1 = np.random.normal(size=(gsize,2))
time.append(time1)
exog1 = np.random.normal(size=(gsize, 5))
exog1[:,0] = 1
exog.append(exog1)
# Pairwise distances within the cluster
distances = scipy.spatial.distance.cdist(time1, time1,
self.distfun[0])
# Pairwise correlations within the cluster
correlations = self.dep_params[0]**distances
correlations_sr = np.linalg.cholesky(correlations)
errors = np.dot(correlations_sr, np.random.normal(size=gsize))
endog1 = np.dot(exog1, self.params) + errors * self.error_sd
endog.append(endog1)
self.exog = np.concatenate(exog, axis=0)
self.endog = np.concatenate(endog)
self.time = np.concatenate(time, axis=0)
self.group = np.concatenate(group)
class Nested_simulator(GEE_simulator):
# Vector containing list of nest sizes (used instead of
# group_size_range).
nest_sizes = None
# Matrix of nest id's (an output parameter)
id_matrix = None
def print_dparams(self, dparams_est):
for j in range(len(self.nest_sizes)):
OUT.write("Nest %d variance estimate: %8.4f\n" % \
(j+1, dparams_est[j]))
OUT.write("Nest %d variance truth: %8.4f\n" % \
(j+1, self.dep_params[j]))
OUT.write("Error variance estimate: %8.4f\n" % \
(dparams_est[-1] - sum(dparams_est[0:-1])))
OUT.write("Error variance truth: %8.4f\n" %
self.error_sd**2)
OUT.write("\n")
def simulate(self):
group_effect_var = self.dep_params[0]
vcomp = self.dep_params[1:]
vcomp.append(0)
endog, exog, group, id_matrix = [], [], [], []
for i in range(self.ngroups):
iterators = [lrange(n) for n in self.nest_sizes]
# The random effects
variances = [np.sqrt(v)*np.random.normal(size=n)
for v,n in zip(vcomp, self.nest_sizes)]
gpe = np.random.normal() * np.sqrt(group_effect_var)
nest_all = []
for j in self.nest_sizes:
nest_all.append(set())
for nest in product(*iterators):
group.append(i)
# The sum of all random effects that apply to this
# unit
ref = gpe + sum([v[j] for v,j in zip(variances, nest)])
exog1 = np.random.normal(size=5)
exog1[0] = 1
exog.append(exog1)
error = ref + self.error_sd * np.random.normal()
endog1 = np.dot(exog1, self.params) + error
endog.append(endog1)
for j in range(len(nest)):
nest_all[j].add(tuple(nest[0:j+1]))
nest1 = [len(x)-1 for x in nest_all]
id_matrix.append(nest1[0:-1])
self.exog = np.array(exog)
self.endog = np.array(endog)
self.group = np.array(group)
self.id_matrix = np.array(id_matrix)
self.time = np.zeros_like(self.endog)
def gen_gendat_ar0(ar):
def gendat_ar0(msg = False):
ars = AR_simulator()
ars.ngroups = 200
ars.params = np.r_[0, -1, 1, 0, 0.5]
ars.error_sd = 2
ars.dep_params = [ar,]
ars.simulate()
return ars, Autoregressive()
return gendat_ar0
def gen_gendat_ar1(ar):
def gendat_ar1():
ars = AR_simulator()
ars.ngroups = 200
ars.params = np.r_[0, -0.8, 1.2, 0, 0.5]
ars.error_sd = 2
ars.dep_params = [ar,]
ars.simulate()
return ars, Autoregressive()
return gendat_ar1
def gendat_nested0():
ns = Nested_simulator()
ns.error_sd = 1.
ns.params = np.r_[0., 1, 1, -1, -1]
ns.ngroups = 50
ns.nest_sizes = [10, 5]
ns.dep_params = [2., 1.]
ns.simulate()
return ns, Nested(ns.id_matrix)
def gendat_nested1():
ns = Nested_simulator()
ns.error_sd = 2.
ns.params = np.r_[0, 1, 1.3, -0.8, -1.2]
ns.ngroups = 50
ns.nest_sizes = [10, 5]
ns.dep_params = [1., 3.]
ns.simulate()
return ns, Nested(ns.id_matrix)
if __name__ == "__main__":
try:
np.set_printoptions(formatter={'all': lambda x: "%8.3f" % x},
suppress=True)
except TypeError:
# older numpy versions do not have formatter option
pass
OUT = open("gee_gaussian_simulation_check.txt", "w")
nrep = 100
gendats = [gen_gendat_ar0(ar) for ar in (0, 0.3, 0.6)]
gendats.extend([gen_gendat_ar1(ar) for ar in (0, 0.3, 0.6)])
gendats.extend([gendat_nested0, gendat_nested1])
lhs = np.array([[0., 1, 1, 0, 0],])
rhs = np.r_[0.,]
# Loop over data generating models
for gendat in gendats:
pvalues = []
params = []
std_errors = []
dep_params = []
for j in range(nrep):
da,va = gendat()
ga = Gaussian()
md = GEE(da.endog, da.exog, da.group, da.time, ga, va)
mdf = md.fit()
scale_inv = 1 / md.estimate_scale()
dep_params.append(np.r_[va.dep_params, scale_inv])
params.append(np.asarray(mdf.params))
std_errors.append(np.asarray(mdf.standard_errors()))
da,va = gendat()
ga = Gaussian()
md = GEE(da.endog, da.exog, da.group, da.time, ga, va,
constraint=(lhs, rhs))
mdf = md.fit()
score = md.score_test_results
pvalue = score["p-value"]
pvalues.append(pvalue)
dparams_mean = np.array(sum(dep_params) / len(dep_params))
OUT.write("Checking dependence parameters:\n")
da.print_dparams(dparams_mean)
params = np.array(params)
eparams = params.mean(0)
sdparams = params.std(0)
std_errors = np.array(std_errors)
std_errors = std_errors.mean(0)
OUT.write("Checking parameter values:\n")
OUT.write("Observed: ")
OUT.write(np.array_str(eparams) + "\n")
OUT.write("Expected: ")
OUT.write(np.array_str(da.params) + "\n")
OUT.write("Absolute difference: ")
OUT.write(np.array_str(eparams - da.params) + "\n")
OUT.write("Relative difference: ")
OUT.write(np.array_str((eparams - da.params) / da.params)
+ "\n")
OUT.write("\n")
OUT.write("Checking standard errors\n")
OUT.write("Observed: ")
OUT.write(np.array_str(sdparams) + "\n")
OUT.write("Expected: ")
OUT.write(np.array_str(std_errors) + "\n")
OUT.write("Absolute difference: ")
OUT.write(np.array_str(sdparams - std_errors) + "\n")
OUT.write("Relative difference: ")
OUT.write(np.array_str((sdparams - std_errors) / std_errors)
+ "\n")
OUT.write("\n")
pvalues.sort()
OUT.write("Checking constrained estimation:\n")
OUT.write("Left hand side:\n")
OUT.write(np.array_str(lhs) + "\n")
OUT.write("Right hand side:\n")
OUT.write(np.array_str(rhs) + "\n")
OUT.write("Observed p-values Expected Null p-values\n")
for q in np.arange(0.1, 0.91, 0.1):
OUT.write("%20.3f %20.3f\n" %
(pvalues[int(q*len(pvalues))], q))
OUT.write("=" * 80 + "\n\n")
OUT.close()
|
bashtage/statsmodels
|
statsmodels/genmod/tests/gee_gaussian_simulation_check.py
|
Python
|
bsd-3-clause
| 9,902
|
[
"Gaussian"
] |
de4f0460ae3a0fab5607f7f30099172196207f97703c0913e59d5a32ed0244a6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.