text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
from __future__ import division, absolute_import
from six.moves import range
from itertools import permutations
import numpy as np
import unittest
from numpy.testing import (assert_allclose, assert_equal, assert_almost_equal,
assert_array_equal)
from MDAnalysis.lib import transformations as t
"""
Testing transformations is weird because there are 2 versions of many of
these functions. This is because both python and Cython versions of
these functions exist. To test therefore, each test has to be done twice,
once for each backend.
The general pattern for this is,
1) Create tests which call self.f (the function)
2) Create mixins which define self.f (one of the two backends)
Eg:
class _ClipMatrix(object):
def test_this(self):
result = self.f(stuff)
assert_awesome(me)
class TestClipMatrixNP(_ClipMatrix):
f = staticmethod(MDAnalysis.lib.transformations._py_clip_matrix)
class TestClipMatrixCY(_ClipMatrix):
f = staticmethod(MDAnalysis.lib.transformations.clip_matrix)
Note that the function to be tested needs to be defined as a static method!
This should ensure that both versions work and are covered!
"""
# tolerance for tests
_ATOL = 1e-06
class _IdentityMatrix(object):
def test_identity_matrix(self):
I = self.f()
assert_allclose(I, np.dot(I, I))
assert_equal(np.sum(I), np.trace(I))
assert_allclose(I, np.identity(4, dtype=np.float64))
class TestIdentityMatrixNP(_IdentityMatrix):
f = staticmethod(t._py_identity_matrix)
class TestIdentityMatrixCy(_IdentityMatrix):
f = staticmethod(t.identity_matrix)
class _TranslationMatrix(object):
def test_translation_matrix(self):
v = np.array([0.2, 0.2, 0.2])
assert_allclose(v, self.f(v)[:3, 3])
class TestTranslationMatrixNP(_TranslationMatrix):
f = staticmethod(t._py_translation_matrix)
class TestTranslationMatrixCy(_TranslationMatrix):
f = staticmethod(t.translation_matrix)
def test_translation_from_matrix():
# doesn't seem to have a Cython backend
v0 = np.array([0.2, 0.2, 0.2])
v1 = t.translation_from_matrix(t.translation_matrix(v0))
assert_allclose(v0, v1)
class _ReflectionMatrix(object):
def test_reflection_matrix(self):
v0 = np.array([0.2, 0.2, 0.2, 1.0]) # arbitrary values
v1 = np.array([0.4, 0.4, 0.4])
R = self.f(v0, v1)
assert_allclose(2., np.trace(R))
assert_allclose(v0, np.dot(R, v0))
v2 = v0.copy()
v2[:3] += v1
v3 = v0.copy()
v2[:3] -= v1
assert_allclose(v2, np.dot(R, v3))
class TestReflectionMatrixNP(_ReflectionMatrix):
f = staticmethod(t._py_reflection_matrix)
class TestReflectionMatrixCy(_ReflectionMatrix):
f = staticmethod(t.reflection_matrix)
def test_reflection_from_matrix():
v0 = np.array([0.2, 0.2, 0.2]) # arbitrary values
v1 = np.array([0.4, 0.4, 0.4])
M0 = t.reflection_matrix(v0, v1)
point, normal = t.reflection_from_matrix(M0)
M1 = t.reflection_matrix(point, normal)
assert_equal(t.is_same_transform(M0, M1), True)
class _RotationMatrix(object):
def test_rotation_matrix(self):
R = self.f(np.pi / 2.0, [0, 0, 1], [1, 0, 0])
assert_allclose(np.dot(R, [0, 0, 0, 1]), [1., -1., 0., 1.])
angle = 0.2 * 2 * np.pi # arbitrary value
direc = np.array([0.2, 0.2, 0.2])
point = np.array([0.4, 0.4, 0.4])
R0 = self.f(angle, direc, point)
R1 = self.f(angle - 2 * np.pi, direc, point)
assert_equal(t.is_same_transform(R0, R1), True)
R0 = self.f(angle, direc, point)
R1 = self.f(-angle, -direc, point)
assert_equal(t.is_same_transform(R0, R1), True)
I = np.identity(4, np.float64)
assert_allclose(I, self.f(np.pi * 2, direc), atol=_ATOL)
assert_allclose(2., np.trace(self.f(np.pi / 2, direc, point)))
class TestRotationMatrixNP(_RotationMatrix):
f = staticmethod(t._py_rotation_matrix)
class TestRotationMatrixCy(_RotationMatrix):
f = staticmethod(t.rotation_matrix)
def test_rotation_from_matrix():
angle = 0.2 * 2 * np.pi # arbitrary values
direc = np.array([0.2, 0.2, 0.2])
point = np.array([0.4, 0.4, 0.4])
R0 = t.rotation_matrix(angle, direc, point)
angle, direc, point = t.rotation_from_matrix(R0)
R1 = t.rotation_matrix(angle, direc, point)
assert_equal(t.is_same_transform(R0, R1), True)
class _ScaleMatrix(object):
def test_scale_matrix(self):
v = np.array([14.1, 15.1, 16.1, 1])
S = self.f(-1.234)
assert_allclose(np.dot(S, v)[:3], -1.234 * v[:3])
class TestScaleMatrixNP(_ScaleMatrix):
f = staticmethod(t._py_scale_matrix)
class TestScaleMatrixCy(_ScaleMatrix):
f = staticmethod(t.scale_matrix)
def test_scale_from_matrix():
factor = 7
origin = np.array([0.2, 0.2, 0.2]) # arbitrary values
direct = np.array([0.4, 0.4, 0.4])
S0 = t.scale_matrix(factor, origin)
factor, origin, direction = t.scale_from_matrix(S0)
S1 = t.scale_matrix(factor, origin, direction)
assert_equal(t.is_same_transform(S0, S1), True)
S0 = t.scale_matrix(factor, origin, direct)
factor, origin, direction = t.scale_from_matrix(S0)
S1 = t.scale_matrix(factor, origin, direction)
assert_equal(t.is_same_transform(S0, S1), True)
class _ProjectionMatrix(object):
def test_projection_matrix_1(self):
P = self.f((0, 0, 0), (1, 0, 0))
assert_allclose(P[1:, 1:], np.identity(4)[1:, 1:], atol=_ATOL)
def test_projection_matrix_2(self):
point = np.array([0.2, 0.2, 0.2]) # arbitrary values
normal = np.array([0.4, 0.4, 0.4])
direct = np.array([0.6, 0.6, 0.6])
persp = np.array([0.8, 0.8, 0.8])
P0 = self.f(point, normal)
# TODO: why isn't this used anymore?
P1 = self.f(point, normal, direction=direct)
P2 = self.f(point, normal, perspective=persp)
P3 = self.f(point, normal, perspective=persp, pseudo=True)
assert_equal(t.is_same_transform(P2, np.dot(P0, P3)), True)
def test_projection_matrix_3(self):
P = self.f((3, 0, 0), (1, 1, 0), (1, 0, 0))
v0 = np.array([14.1, 15.1, 16.1, 1]) # arbitrary values
v1 = np.dot(P, v0)
assert_allclose(v1[1], v0[1], atol=_ATOL)
assert_allclose(v1[0], 3.0 - v1[1], atol=_ATOL)
class TestProjectionMatrixNP(_ProjectionMatrix):
f = staticmethod(t._py_projection_matrix)
class TestProjectionMatrixCy(_ProjectionMatrix):
f = staticmethod(t.projection_matrix)
class TestProjectionFromMatrix(object):
def setUp(self):
self.point = np.array([0.2, 0.2, 0.2]) # arbitrary values
self.normal = np.array([0.4, 0.4, 0.4])
self.direct = np.array([0.6, 0.6, 0.6])
self.persp = np.array([0.8, 0.8, 0.8])
def test_projection_from_matrix_1(self):
P0 = t.projection_matrix(self.point, self.normal)
result = t.projection_from_matrix(P0)
P1 = t.projection_matrix(*result)
assert_equal(t.is_same_transform(P0, P1), True)
def test_projection_from_matrix_2(self):
P0 = t.projection_matrix(self.point, self.normal, self.direct)
result = t.projection_from_matrix(P0)
P1 = t.projection_matrix(*result)
assert_equal(t.is_same_transform(P0, P1), True)
def test_projection_from_matrix_3(self):
P0 = t.projection_matrix(
self.point, self.normal, perspective=self.persp, pseudo=False)
result = t.projection_from_matrix(P0, pseudo=False)
P1 = t.projection_matrix(*result)
assert_equal(t.is_same_transform(P0, P1), True)
def test_projection_from_matrix_4(self):
P0 = t.projection_matrix(
self.point, self.normal, perspective=self.persp, pseudo=True)
result = t.projection_from_matrix(P0, pseudo=True)
P1 = t.projection_matrix(*result)
assert_equal(t.is_same_transform(P0, P1), True)
class _ClipMatrix(unittest.TestCase):
def test_clip_matrix_1(self):
frustrum = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) # arbitrary values
frustrum[1] += frustrum[0]
frustrum[3] += frustrum[2]
frustrum[5] += frustrum[4]
M = self.f(perspective=False, *frustrum)
assert_allclose(
np.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0]),
np.array([-1., -1., -1., 1.]))
assert_allclose(
np.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0]),
np.array([1., 1., 1., 1.]))
def test_clip_matrix_2(self):
frustrum = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6]) # arbitrary values
frustrum[1] += frustrum[0]
frustrum[3] += frustrum[2]
frustrum[5] += frustrum[4]
M = self.f(perspective=True, *frustrum)
v = np.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
assert_allclose(v / v[3], np.array([-1., -1., -1., 1.]))
v = np.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0])
assert_allclose(v / v[3], np.array([1., 1., -1., 1.]))
def test_clip_matrix_frustrum_left_right_bounds(self):
'''ValueError should be raised if left > right.'''
frustrum = np.array([0.4, 0.3, 0.3, 0.7, 0.5, 1.1])
with self.assertRaises(ValueError):
self.f(*frustrum)
def test_clip_matrix_frustrum_bottom_top_bounds(self):
'''ValueError should be raised if bottom > top.'''
frustrum = np.array([0.1, 0.3, 0.71, 0.7, 0.5, 1.1])
with self.assertRaises(ValueError):
self.f(*frustrum)
def test_clip_matrix_frustrum_near_far_bounds(self):
'''ValueError should be raised if near > far.'''
frustrum = np.array([0.1, 0.3, 0.3, 0.7, 1.5, 1.1])
with self.assertRaises(ValueError):
self.f(*frustrum)
class TestClipMatrixNP(_ClipMatrix):
f = staticmethod(t._py_clip_matrix)
class TestClipMatrixCy(_ClipMatrix):
f = staticmethod(t.clip_matrix)
class _ShearMatrix(object):
def test_shear_matrix(self):
angle = 0.2 * 4 * np.pi # arbitrary values
direct = np.array([0.2, 0.2, 0.2])
point = np.array([0.3, 0.4, 0.5])
normal = np.cross(direct, np.array([0.8, 0.6, 0.4]))
S = self.f(angle, direct, point, normal)
assert_allclose(1.0, np.linalg.det(S), atol=_ATOL)
class TestShearMatrixNP(_ShearMatrix):
f = staticmethod(t._py_shear_matrix)
class TestShearMatrixCy(_ShearMatrix):
f = staticmethod(t.shear_matrix)
def test_shear_from_matrix():
# This seems to fail sometimes if the random numbers
# roll certain values....
# angle = (random.random() - 0.5) * 4*np.pi
# direct = np.random.random(3) - 0.5
# point = np.random.random(3) - 0.5
# normal = np.cross(direct, np.random.random(3))
# In this random configuration the test will fail about 0.05% of all times.
# Then we hit some edge-cases of the algorithm. The edge cases for these
# values are slightly different for the linalg library used (MKL/LAPACK).
# So here are some of my random numbers
angle = 2.8969075413405783 # arbitrary values
direct = np.array([-0.31117458, -0.41769518, -0.01188556])
point = np.array([-0.0035982, -0.40997482, 0.42241425])
normal = np.cross(direct, np.array([0.08122421, 0.4747914, 0.19851859]))
S0 = t.shear_matrix(angle, direct, point, normal)
angle, direct, point, normal = t.shear_from_matrix(S0)
S1 = t.shear_matrix(angle, direct, point, normal)
assert_equal(t.is_same_transform(S0, S1), True)
class TestDecomposeMatrix(object):
def test_decompose_matrix_1(self):
T0 = t.translation_matrix((1, 2, 3))
scale, shear, angles, trans, persp = t.decompose_matrix(T0)
T1 = t.translation_matrix(trans)
assert_allclose(T0, T1)
def test_decompose_matrix_2(self):
S = t.scale_matrix(0.123)
scale, shear, angles, trans, persp = t.decompose_matrix(S)
assert_equal(scale[0], 0.123)
def test_decompose_matrix_3(self):
R0 = t.euler_matrix(1, 2, 3)
scale, shear, angles, trans, persp = t.decompose_matrix(R0)
R1 = t.euler_matrix(*angles)
assert_allclose(R0, R1)
def test_compose_matrix():
scale = np.array([0.2, 0.2, 0.2]) # arbitrary values
shear = np.array([0.4, 0.4, 0.4])
angles = np.array([0.6, 0.6, 0.6]) * 2 * np.pi
trans = np.array([0.8, 0.8, 0.8])
persp = np.array([0.9, 0.9, 0.9, 0.9])
M0 = t.compose_matrix(scale, shear, angles, trans, persp)
result = t.decompose_matrix(M0)
M1 = t.compose_matrix(*result)
assert_equal(t.is_same_transform(M0, M1), True)
class _OrthogonalizationMatrix(object):
def test_orthogonalization_matrix_1(self):
O = self.f((10., 10., 10.), (90., 90., 90.))
assert_allclose(O[:3, :3], np.identity(3, float) * 10, atol=_ATOL)
def test_orthogonalization_matrix_2(self):
O = self.f([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
assert_allclose(np.sum(O), 43.063229, atol=_ATOL)
class TestOrthogonalizationMatrixNP(_OrthogonalizationMatrix):
f = staticmethod(t._py_orthogonalization_matrix)
class TestOrthogonalizationMatrixCy(_OrthogonalizationMatrix):
f = staticmethod(t.orthogonalization_matrix)
class _SuperimpositionMatrix(object):
def test_superimposition_matrix(self):
v0 = np.sin(np.linspace(0, 0.99, 30)).reshape(3,
10) # arbitrary values
M = self.f(v0, v0)
assert_allclose(M, np.identity(4), atol=_ATOL)
R = t.random_rotation_matrix(np.array([0.3, 0.4, 0.5]))
v0 = ((1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 1))
v1 = np.dot(R, v0)
M = self.f(v0, v1)
assert_allclose(v1, np.dot(M, v0), atol=_ATOL)
v0 = np.sin(np.linspace(-1, 1, 400)).reshape(4, 100)
v0[3] = 1.0
v1 = np.dot(R, v0)
M = self.f(v0, v1)
assert_allclose(v1, np.dot(M, v0), atol=_ATOL)
S = t.scale_matrix(0.45)
T = t.translation_matrix(np.array([0.2, 0.2, 0.2]) - 0.5)
M = t.concatenate_matrices(T, R, S)
v1 = np.dot(M, v0)
v0[:3] += np.sin(np.linspace(0.0, 1e-9, 300)).reshape(3, -1)
M = self.f(v0, v1, scaling=True)
assert_allclose(v1, np.dot(M, v0), atol=_ATOL)
M = self.f(v0, v1, scaling=True, usesvd=False)
assert_allclose(v1, np.dot(M, v0), atol=_ATOL)
v = np.empty((4, 100, 3), dtype=np.float64)
v[:, :, 0] = v0
M = self.f(v0, v1, scaling=True, usesvd=False)
assert_allclose(v1, np.dot(M, v[:, :, 0]), atol=_ATOL)
class TestSuperimpositionMatrixNP(_SuperimpositionMatrix):
f = staticmethod(t._py_superimposition_matrix)
class TestSuperimpositionMatrixCy(_SuperimpositionMatrix):
f = staticmethod(t.superimposition_matrix)
class _EulerMatrix(object):
def test_euler_matrix_1(self):
R = self.f(1, 2, 3, 'syxz')
assert_allclose(np.sum(R[0]), -1.34786452)
def test_euler_matrix_2(self):
R = self.f(1, 2, 3, (0, 1, 0, 1))
assert_allclose(np.sum(R[0]), -0.383436184)
class TestEulerMatrixNP(_EulerMatrix):
f = staticmethod(t._py_euler_matrix)
class TestEulerMatrixCy(_EulerMatrix):
f = staticmethod(t.euler_matrix)
class _EulerFromMatrix(object):
def test_euler_from_matrix_1(self):
R0 = t.euler_matrix(1, 2, 3, 'syxz')
al, be, ga = self.f(R0, 'syxz')
R1 = t.euler_matrix(al, be, ga, 'syxz')
assert_allclose(R0, R1)
def test_euler_from_matrix_2(self):
angles = 4.0 * np.pi * np.array([-0.3, -0.3, -0.3]) # arbitrary values
for axes in t._AXES2TUPLE.keys():
R0 = t.euler_matrix(axes=axes, *angles)
R1 = t.euler_matrix(axes=axes, *self.f(R0, axes))
assert_allclose(R0, R1, err_msg=("{0} failed".format(axes)))
class TestEulerFromMatrixNP(_EulerFromMatrix):
f = staticmethod(t._py_euler_from_matrix)
class TestEulerFromMatrixCy(_EulerFromMatrix):
f = staticmethod(t.euler_from_matrix)
def test_euler_from_quaternion():
angles = t.euler_from_quaternion([0.99810947, 0.06146124, 0, 0])
assert_allclose(angles, [0.123, 0, 0], atol=_ATOL)
class _QuaternionFromEuler(object):
def test_quaternion_from_euler(self):
q = self.f(1, 2, 3, 'ryxz')
assert_allclose(
q, [0.435953, 0.310622, -0.718287, 0.444435], atol=_ATOL)
class TestQuaternionFromEulerNP(_QuaternionFromEuler):
f = staticmethod(t._py_quaternion_from_euler)
class TestQuaternionFromEulerCy(_QuaternionFromEuler):
f = staticmethod(t.quaternion_from_euler)
class _QuaternionAboutAxis(object):
def test_quaternion_about_axis(self):
q = self.f(0.123, (1, 0, 0))
assert_allclose(q, [0.99810947, 0.06146124, 0, 0], atol=_ATOL)
class TestQuaternionAboutAxisNP(_QuaternionAboutAxis):
f = staticmethod(t._py_quaternion_about_axis)
class TestQuaternionAboutAxisCy(_QuaternionAboutAxis):
f = staticmethod(t.quaternion_about_axis)
class _QuaternionMatrix(object):
def test_quaternion_matrix_1(self):
M = self.f([0.99810947, 0.06146124, 0, 0])
assert_allclose(M, t.rotation_matrix(0.123, (1, 0, 0)), atol=_ATOL)
def test_quaternion_matrix_2(self):
M = self.f([1, 0, 0, 0])
assert_allclose(M, t.identity_matrix(), atol=_ATOL)
def test_quaternion_matrix_3(self):
M = self.f([0, 1, 0, 0])
assert_allclose(M, np.diag([1, -1, -1, 1]), atol=_ATOL)
class TestQuaternionMatrixNP(_QuaternionMatrix):
f = staticmethod(t._py_quaternion_matrix)
class TestQuaternionMatrixCy(_QuaternionMatrix):
f = staticmethod(t.quaternion_matrix)
class _QuaternionFromMatrix(object):
def test_quaternion_from_matrix_1(self):
q = self.f(t.identity_matrix(), True)
assert_allclose(q, [1., 0., 0., 0.], atol=_ATOL)
def test_quaternion_from_matrix_2(self):
q = self.f(np.diag([1., -1., -1., 1.]))
check = (np.allclose(
q, [0, 1, 0, 0], atol=_ATOL) or np.allclose(
q, [0, -1, 0, 0], atol=_ATOL))
assert_equal(check, True)
def test_quaternion_from_matrix_3(self):
R = t.rotation_matrix(0.123, (1, 2, 3))
q = self.f(R, True)
assert_allclose(
q, [0.9981095, 0.0164262, 0.0328524, 0.0492786], atol=_ATOL)
def test_quaternion_from_matrix_4(self):
R = [[-0.545, 0.797, 0.260, 0], [0.733, 0.603, -0.313, 0],
[-0.407, 0.021, -0.913, 0], [0, 0, 0, 1]]
q = self.f(R)
assert_allclose(q, [0.19069, 0.43736, 0.87485, -0.083611], atol=_ATOL)
def test_quaternion_from_matrix_5(self):
R = [[0.395, 0.362, 0.843, 0], [-0.626, 0.796, -0.056, 0],
[-0.677, -0.498, 0.529, 0], [0, 0, 0, 1]]
q = self.f(R)
assert_allclose(
q, [0.82336615, -0.13610694, 0.46344705, -0.29792603], atol=_ATOL)
def test_quaternion_from_matrix_6(self):
R = t.random_rotation_matrix()
q = self.f(R)
assert_equal(t.is_same_transform(R, t.quaternion_matrix(q)), True)
class TestQuaternionFromMatrixNP(_QuaternionFromMatrix):
f = staticmethod(t._py_quaternion_from_matrix)
class TestQuaternionFromMatrixCy(_QuaternionFromMatrix):
f = staticmethod(t.quaternion_from_matrix)
class _QuaternionMultiply(object):
def test_quaternion_multiply(self):
q = self.f([4, 1, -2, 3], [8, -5, 6, 7])
assert_allclose(q, [28, -44, -14, 48])
class TestQuaternionMultiplyNP(_QuaternionMultiply):
f = staticmethod(t._py_quaternion_multiply)
class TestQuaternionMultiplyCy(_QuaternionMultiply):
f = staticmethod(t.quaternion_multiply)
class _QuaternionConjugate(object):
def test_quaternion_conjugate(self):
q0 = t.random_quaternion()
q1 = self.f(q0)
check = q1[0] == q0[0] and all(q1[1:] == -q0[1:])
assert_equal(check, True)
class TestQuaternionConjugateNP(_QuaternionConjugate):
f = staticmethod(t._py_quaternion_conjugate)
class TestQuaternionConjugateCy(_QuaternionConjugate):
f = staticmethod(t.quaternion_conjugate)
class _QuaternionInverse(object):
def test_quaternion_inverse(self):
q0 = t.random_quaternion()
q1 = self.f(q0)
assert_allclose(
t.quaternion_multiply(q0, q1), [1, 0, 0, 0], atol=_ATOL)
class TestQuaternionInverseNP(_QuaternionInverse):
f = staticmethod(t._py_quaternion_inverse)
class TestQuaternionInverseCy(_QuaternionInverse):
f = staticmethod(t.quaternion_inverse)
def test_quaternion_real():
assert_allclose(t.quaternion_real([3.0, 0.0, 1.0, 2.0]), 3.0)
def test_quaternion_imag():
assert_allclose(t.quaternion_imag([3.0, 0.0, 1.0, 2.0]), [0.0, 1.0, 2.0])
class _QuaternionSlerp(object):
def test_quaternion_slerp(self):
q0 = t.random_quaternion()
q1 = t.random_quaternion()
q = self.f(q0, q1, 0.0)
assert_allclose(q, q0, atol=_ATOL)
q = self.f(q0, q1, 1.0, 1)
assert_allclose(q, q1, atol=_ATOL)
q = self.f(q0, q1, 0.5)
angle = np.arccos(np.dot(q0, q))
check = (np.allclose(2.0, np.arccos(np.dot(q0, q1)) / angle) or
np.allclose(2.0, np.arccos(-np.dot(q0, q1)) / angle))
assert_equal(check, True)
class TestQuaternionSlerpNP(_QuaternionSlerp):
f = staticmethod(t._py_quaternion_slerp)
class TestQuaternionSlerpCy(_QuaternionSlerp):
f = staticmethod(t.quaternion_slerp)
class _RandomQuaternion(object):
def test_random_quaternion_1(self):
q = self.f()
assert_allclose(1.0, t.vector_norm(q))
def test_random_quaternion_2(self):
q = self.f(np.array([0.2, 0.2, 0.2]))
assert_equal(len(q.shape), 1)
assert_equal(q.shape[0] == 4, True)
class TestRandomQuaternionNP(_RandomQuaternion):
f = staticmethod(t._py_random_quaternion)
class TestRandomQuaternionCy(_RandomQuaternion):
f = staticmethod(t.random_quaternion)
class _RandomRotationMatrix(object):
def test_random_rotation_matrix(self):
R = self.f()
assert_allclose(np.dot(R.T, R), np.identity(4), atol=_ATOL)
class TestRandomRotationMatrixNP(_RandomRotationMatrix):
f = staticmethod(t._py_random_rotation_matrix)
class TestRandomRotationMatrixCy(_RandomRotationMatrix):
f = staticmethod(t.random_rotation_matrix)
class _InverseMatrix(object):
def _check_inverse(self, size):
# Create a known random state to generate numbers from
# these numbers will then be uncorrelated but deterministic
rs = np.random.RandomState(1234)
M0 = rs.randn(size, size)
M1 = self.f(M0)
assert_allclose(M1, np.linalg.inv(M0), err_msg=str(size), atol=_ATOL)
def test_inverse_matrix(self):
M0 = t.random_rotation_matrix()
M1 = self.f(M0.T)
assert_allclose(M1, np.linalg.inv(M0.T))
for size in range(1, 7):
yield self._check_inverse, size
class TestInverseMatrixNP(_InverseMatrix):
f = staticmethod(t._py_inverse_matrix)
class TestInverseMatrixCy(_InverseMatrix):
f = staticmethod(t.inverse_matrix)
class _IsSameTransform(object):
def test_is_same_transform_1(self):
assert_equal(self.f(np.identity(4), np.identity(4)), True)
def test_is_same_transform_2(self):
assert_equal(self.f(t.random_rotation_matrix(), np.identity(4)), False)
class TestIsSameTransformNP(_IsSameTransform):
f = staticmethod(t._py_is_same_transform)
class TestIsSameTransformCy(_IsSameTransform):
f = staticmethod(t.is_same_transform)
class _RandomVector(object):
def test_random_vector_1(self):
v = self.f(1000)
check = np.all(v >= 0.0) and np.all(v < 1.0)
assert_equal(check, True)
def test_random_vector_2(self):
v0 = self.f(10)
v1 = self.f(10)
assert_equal(np.any(v0 == v1), False)
class TestRandomVectorNP(_RandomVector):
f = staticmethod(t._py_random_vector)
class TestRandomVectorCy(_RandomVector):
f = staticmethod(t.random_vector)
class _UnitVector(object):
def test_unit_vector_1(self):
v0 = np.array([0.2, 0.2, 0.2])
v1 = self.f(v0)
assert_allclose(v1, v0 / np.linalg.norm(v0), atol=_ATOL)
def test_unit_vector_2(self):
v0 = np.sin(np.linspace(0, 10, 5 * 4 * 3)).reshape(5, 4, 3)
v1 = self.f(v0, axis=-1)
v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0 * v0, axis=2)), 2)
assert_allclose(v1, v2, atol=_ATOL)
def test_unit_vector_3(self):
v0 = np.sin(np.linspace(0, 10, 5 * 4 * 3)).reshape(5, 4, 3)
v1 = self.f(v0, axis=1)
v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0 * v0, axis=1)), 1)
assert_allclose(v1, v2, atol=_ATOL)
def test_unit_vector_4(self):
v0 = np.sin(np.linspace(0, 10, 5 * 4 * 3)).reshape(5, 4, 3)
v1 = np.empty((5, 4, 3), dtype=np.float64)
v2 = v0 / np.expand_dims(np.sqrt(np.sum(v0 * v0, axis=1)), 1)
self.f(v0, axis=1, out=v1)
assert_allclose(v1, v2, atol=_ATOL)
def test_unit_vector_5(self):
assert_equal(list(self.f([])), [])
def test_unit_vector_6(self):
assert_equal(list(self.f([1.0])), [1.0])
class TestUnitVectorNP(_UnitVector):
f = staticmethod(t._py_unit_vector)
class TestUnitVectorCy(_UnitVector):
f = staticmethod(t.unit_vector)
class _VectorNorm(object):
def test_vector_norm_1(self):
v = np.array([0.2, 0.2, 0.2])
n = self.f(v)
assert_allclose(n, np.linalg.norm(v), atol=_ATOL)
def test_vector_norm_2(self):
v = np.sin(np.linspace(0, 10, 6 * 5 * 3)).reshape(6, 5, 3)
n = self.f(v, axis=-1)
assert_allclose(n, np.sqrt(np.sum(v * v, axis=2)), atol=_ATOL)
def test_vector_norm_3(self):
v = np.sin(np.linspace(0, 10, 6 * 5 * 3)).reshape(6, 5, 3)
n = self.f(v, axis=1)
assert_allclose(n, np.sqrt(np.sum(v * v, axis=1)), atol=_ATOL)
def test_vector_norm_4(self):
v = np.sin(np.linspace(0, 10, 5 * 4 * 3)).reshape(5, 4, 3)
n = np.empty((5, 3), dtype=np.float64)
self.f(v, axis=1, out=n)
assert_allclose(n, np.sqrt(np.sum(v * v, axis=1)), atol=_ATOL)
def test_vector_norm_5(self):
assert_equal(self.f([]), 0.0)
def test_vector_norm_6(self):
assert_equal(self.f([1.0]), 1.0)
class TestVectorNormNP(_VectorNorm):
f = staticmethod(t._py_vector_norm)
class TestVectorNormCy(_VectorNorm):
f = staticmethod(t.vector_norm)
class TestArcBall(object):
def test_arcball_1(self):
ball = t.Arcball()
ball = t.Arcball(initial=np.identity(4))
ball.place([320, 320], 320)
ball.down([500, 250])
ball.drag([475, 275])
R = ball.matrix()
assert_allclose(np.sum(R), 3.90583455, atol=_ATOL)
def test_arcball_2(self):
ball = t.Arcball(initial=[1, 0, 0, 0])
ball.place([320, 320], 320)
ball.setaxes([1, 1, 0], [-1, 1, 0])
ball.setconstrain(True)
ball.down([400, 200])
ball.drag([200, 400])
R = ball.matrix()
assert_allclose(np.sum(R), 0.2055924)
def test_transformations_old_module():
"""test that MDAnalysis.core.transformations is still importable
(deprecated for 1.0)
"""
try:
import MDAnalysis.core.transformations
except (ImportError, NameError):
raise AssertionError("MDAnalysis.core.transformations not importable. "
"Only remove for 1.0")
# NOTE: removed this test with release 1.0 when we remove the stub
def test_rotaxis_equal_vectors():
a = np.arange(3)
x = t.rotaxis(a, a)
assert_array_equal(x, [1, 0, 0])
def test_rotaxis_different_vectors():
# use random coordinate system
e = np.eye(3)
r = np.array([[0.69884766, 0.59804425, -0.39237102],
[0.18784672, 0.37585347, 0.90744023],
[0.69016342, -0.7078681, 0.15032367]])
re = np.dot(r, e)
for i, j, l in permutations(range(3)):
x = t.rotaxis(re[i], re[j])
# use abs since direction doesn't matter
assert_almost_equal(np.abs(np.dot(x, re[l])), 1)
| kain88-de/mdanalysis | testsuite/MDAnalysisTests/test_transformations.py | Python | gpl-2.0 | 29,282 | [
"MDAnalysis"
] | 12df592643f495306bb1a13e36e7a8a144e7050a6a53af84d6a72a24f1fbbc3b |
"""
Pyperclip
A cross-platform clipboard module for Python,
with copy & paste functions for plain text.
By Al Sweigart al@inventwithpython.com
BSD License
Usage:
import pyperclip
pyperclip.copy('The text to be copied to the clipboard.')
spam = pyperclip.paste()
if not pyperclip.is_available():
print("Copy functionality unavailable!")
On Windows, no additional modules are needed.
On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli
commands. (These commands should come with OS X.).
On Linux, install xclip or xsel via package manager. For example, in Debian:
sudo apt-get install xclip
sudo apt-get install xsel
Otherwise on Linux, you will need the PyQt5 modules installed.
This module does not work with PyGObject yet.
Cygwin is currently not supported.
Security Note: This module runs programs with these names:
- which
- where
- pbcopy
- pbpaste
- xclip
- xsel
- klipper
- qdbus
A malicious user could rename or add programs with these names, tricking
Pyperclip into running them with whatever permissions the Python process has.
"""
__version__ = "1.7.0"
import contextlib
import ctypes
from ctypes import c_size_t, c_wchar, c_wchar_p, get_errno, sizeof
import os
import platform
import subprocess
import time
import warnings
# `import PyQt4` sys.exit()s if DISPLAY is not in the environment.
# Thus, we need to detect the presence of $DISPLAY manually
# and not load PyQt4 if it is absent.
HAS_DISPLAY = os.getenv("DISPLAY", False)
EXCEPT_MSG = """
Pyperclip could not find a copy/paste mechanism for your system.
For more information, please visit
https://pyperclip.readthedocs.io/en/latest/introduction.html#not-implemented-error
"""
ENCODING = "utf-8"
# The "which" unix command finds where a command is.
if platform.system() == "Windows":
WHICH_CMD = "where"
else:
WHICH_CMD = "which"
def _executable_exists(name):
return (
subprocess.call(
[WHICH_CMD, name], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
== 0
)
# Exceptions
class PyperclipException(RuntimeError):
pass
class PyperclipWindowsException(PyperclipException):
def __init__(self, message):
message += f" ({ctypes.WinError()})"
super().__init__(message)
def _stringifyText(text) -> str:
acceptedTypes = (str, int, float, bool)
if not isinstance(text, acceptedTypes):
raise PyperclipException(
f"only str, int, float, and bool values "
f"can be copied to the clipboard, not {type(text).__name__}"
)
return str(text)
def init_osx_pbcopy_clipboard():
def copy_osx_pbcopy(text):
text = _stringifyText(text) # Converts non-str values to str.
p = subprocess.Popen(["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode(ENCODING))
def paste_osx_pbcopy():
p = subprocess.Popen(["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
return stdout.decode(ENCODING)
return copy_osx_pbcopy, paste_osx_pbcopy
def init_osx_pyobjc_clipboard():
def copy_osx_pyobjc(text):
"""Copy string argument to clipboard"""
text = _stringifyText(text) # Converts non-str values to str.
newStr = Foundation.NSString.stringWithString_(text).nsstring()
newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding)
board = AppKit.NSPasteboard.generalPasteboard()
board.declareTypes_owner_([AppKit.NSStringPboardType], None)
board.setData_forType_(newData, AppKit.NSStringPboardType)
def paste_osx_pyobjc():
"""Returns contents of clipboard"""
board = AppKit.NSPasteboard.generalPasteboard()
content = board.stringForType_(AppKit.NSStringPboardType)
return content
return copy_osx_pyobjc, paste_osx_pyobjc
def init_qt_clipboard():
global QApplication
# $DISPLAY should exist
# Try to import from qtpy, but if that fails try PyQt5 then PyQt4
try:
from qtpy.QtWidgets import QApplication
except ImportError:
try:
from PyQt5.QtWidgets import QApplication
except ImportError:
from PyQt4.QtGui import QApplication
app = QApplication.instance()
if app is None:
app = QApplication([])
def copy_qt(text):
text = _stringifyText(text) # Converts non-str values to str.
cb = app.clipboard()
cb.setText(text)
def paste_qt() -> str:
cb = app.clipboard()
return str(cb.text())
return copy_qt, paste_qt
def init_xclip_clipboard():
DEFAULT_SELECTION = "c"
PRIMARY_SELECTION = "p"
def copy_xclip(text, primary=False):
text = _stringifyText(text) # Converts non-str values to str.
selection = DEFAULT_SELECTION
if primary:
selection = PRIMARY_SELECTION
p = subprocess.Popen(
["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True
)
p.communicate(input=text.encode(ENCODING))
def paste_xclip(primary=False):
selection = DEFAULT_SELECTION
if primary:
selection = PRIMARY_SELECTION
p = subprocess.Popen(
["xclip", "-selection", selection, "-o"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = p.communicate()
# Intentionally ignore extraneous output on stderr when clipboard is empty
return stdout.decode(ENCODING)
return copy_xclip, paste_xclip
def init_xsel_clipboard():
DEFAULT_SELECTION = "-b"
PRIMARY_SELECTION = "-p"
def copy_xsel(text, primary=False):
text = _stringifyText(text) # Converts non-str values to str.
selection_flag = DEFAULT_SELECTION
if primary:
selection_flag = PRIMARY_SELECTION
p = subprocess.Popen(
["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True
)
p.communicate(input=text.encode(ENCODING))
def paste_xsel(primary=False):
selection_flag = DEFAULT_SELECTION
if primary:
selection_flag = PRIMARY_SELECTION
p = subprocess.Popen(
["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True
)
stdout, stderr = p.communicate()
return stdout.decode(ENCODING)
return copy_xsel, paste_xsel
def init_klipper_clipboard():
def copy_klipper(text):
text = _stringifyText(text) # Converts non-str values to str.
p = subprocess.Popen(
[
"qdbus",
"org.kde.klipper",
"/klipper",
"setClipboardContents",
text.encode(ENCODING),
],
stdin=subprocess.PIPE,
close_fds=True,
)
p.communicate(input=None)
def paste_klipper():
p = subprocess.Popen(
["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"],
stdout=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = p.communicate()
# Workaround for https://bugs.kde.org/show_bug.cgi?id=342874
# TODO: https://github.com/asweigart/pyperclip/issues/43
clipboardContents = stdout.decode(ENCODING)
# even if blank, Klipper will append a newline at the end
assert len(clipboardContents) > 0
# make sure that newline is there
assert clipboardContents.endswith("\n")
if clipboardContents.endswith("\n"):
clipboardContents = clipboardContents[:-1]
return clipboardContents
return copy_klipper, paste_klipper
def init_dev_clipboard_clipboard():
def copy_dev_clipboard(text):
text = _stringifyText(text) # Converts non-str values to str.
if text == "":
warnings.warn(
"Pyperclip cannot copy a blank string to the clipboard on Cygwin."
"This is effectively a no-op."
)
if "\r" in text:
warnings.warn("Pyperclip cannot handle \\r characters on Cygwin.")
with open("/dev/clipboard", "wt") as fo:
fo.write(text)
def paste_dev_clipboard() -> str:
with open("/dev/clipboard", "rt") as fo:
content = fo.read()
return content
return copy_dev_clipboard, paste_dev_clipboard
def init_no_clipboard():
class ClipboardUnavailable:
def __call__(self, *args, **kwargs):
raise PyperclipException(EXCEPT_MSG)
def __bool__(self) -> bool:
return False
return ClipboardUnavailable(), ClipboardUnavailable()
# Windows-related clipboard functions:
class CheckedCall:
def __init__(self, f):
super().__setattr__("f", f)
def __call__(self, *args):
ret = self.f(*args)
if not ret and get_errno():
raise PyperclipWindowsException("Error calling " + self.f.__name__)
return ret
def __setattr__(self, key, value):
setattr(self.f, key, value)
def init_windows_clipboard():
global HGLOBAL, LPVOID, DWORD, LPCSTR, INT
global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE
from ctypes.wintypes import (
HGLOBAL,
LPVOID,
DWORD,
LPCSTR,
INT,
HWND,
HINSTANCE,
HMENU,
BOOL,
UINT,
HANDLE,
)
windll = ctypes.windll
msvcrt = ctypes.CDLL("msvcrt")
safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
safeCreateWindowExA.argtypes = [
DWORD,
LPCSTR,
LPCSTR,
DWORD,
INT,
INT,
INT,
INT,
HWND,
HMENU,
HINSTANCE,
LPVOID,
]
safeCreateWindowExA.restype = HWND
safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow)
safeDestroyWindow.argtypes = [HWND]
safeDestroyWindow.restype = BOOL
OpenClipboard = windll.user32.OpenClipboard
OpenClipboard.argtypes = [HWND]
OpenClipboard.restype = BOOL
safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard)
safeCloseClipboard.argtypes = []
safeCloseClipboard.restype = BOOL
safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard)
safeEmptyClipboard.argtypes = []
safeEmptyClipboard.restype = BOOL
safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData)
safeGetClipboardData.argtypes = [UINT]
safeGetClipboardData.restype = HANDLE
safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData)
safeSetClipboardData.argtypes = [UINT, HANDLE]
safeSetClipboardData.restype = HANDLE
safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc)
safeGlobalAlloc.argtypes = [UINT, c_size_t]
safeGlobalAlloc.restype = HGLOBAL
safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock)
safeGlobalLock.argtypes = [HGLOBAL]
safeGlobalLock.restype = LPVOID
safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock)
safeGlobalUnlock.argtypes = [HGLOBAL]
safeGlobalUnlock.restype = BOOL
wcslen = CheckedCall(msvcrt.wcslen)
wcslen.argtypes = [c_wchar_p]
wcslen.restype = UINT
GMEM_MOVEABLE = 0x0002
CF_UNICODETEXT = 13
@contextlib.contextmanager
def window():
"""
Context that provides a valid Windows hwnd.
"""
# we really just need the hwnd, so setting "STATIC"
# as predefined lpClass is just fine.
hwnd = safeCreateWindowExA(
0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None
)
try:
yield hwnd
finally:
safeDestroyWindow(hwnd)
@contextlib.contextmanager
def clipboard(hwnd):
"""
Context manager that opens the clipboard and prevents
other applications from modifying the clipboard content.
"""
# We may not get the clipboard handle immediately because
# some other application is accessing it (?)
# We try for at least 500ms to get the clipboard.
t = time.time() + 0.5
success = False
while time.time() < t:
success = OpenClipboard(hwnd)
if success:
break
time.sleep(0.01)
if not success:
raise PyperclipWindowsException("Error calling OpenClipboard")
try:
yield
finally:
safeCloseClipboard()
def copy_windows(text):
# This function is heavily based on
# http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard
text = _stringifyText(text) # Converts non-str values to str.
with window() as hwnd:
# http://msdn.com/ms649048
# If an application calls OpenClipboard with hwnd set to NULL,
# EmptyClipboard sets the clipboard owner to NULL;
# this causes SetClipboardData to fail.
# => We need a valid hwnd to copy something.
with clipboard(hwnd):
safeEmptyClipboard()
if text:
# http://msdn.com/ms649051
# If the hMem parameter identifies a memory object,
# the object must have been allocated using the
# function with the GMEM_MOVEABLE flag.
count = wcslen(text) + 1
handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar))
locked_handle = safeGlobalLock(handle)
ctypes.memmove(
c_wchar_p(locked_handle),
c_wchar_p(text),
count * sizeof(c_wchar),
)
safeGlobalUnlock(handle)
safeSetClipboardData(CF_UNICODETEXT, handle)
def paste_windows():
with clipboard(None):
handle = safeGetClipboardData(CF_UNICODETEXT)
if not handle:
# GetClipboardData may return NULL with errno == NO_ERROR
# if the clipboard is empty.
# (Also, it may return a handle to an empty buffer,
# but technically that's not empty)
return ""
return c_wchar_p(handle).value
return copy_windows, paste_windows
def init_wsl_clipboard():
def copy_wsl(text):
text = _stringifyText(text) # Converts non-str values to str.
p = subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True)
p.communicate(input=text.encode(ENCODING))
def paste_wsl():
p = subprocess.Popen(
["powershell.exe", "-command", "Get-Clipboard"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
stdout, stderr = p.communicate()
# WSL appends "\r\n" to the contents.
return stdout[:-2].decode(ENCODING)
return copy_wsl, paste_wsl
# Automatic detection of clipboard mechanisms
# and importing is done in determine_clipboard():
def determine_clipboard():
"""
Determine the OS/platform and set the copy() and paste() functions
accordingly.
"""
global Foundation, AppKit, qtpy, PyQt4, PyQt5
# Setup for the CYGWIN platform:
if (
"cygwin" in platform.system().lower()
): # Cygwin has a variety of values returned by platform.system(),
# such as 'CYGWIN_NT-6.1'
# FIXME: pyperclip currently does not support Cygwin,
# see https://github.com/asweigart/pyperclip/issues/55
if os.path.exists("/dev/clipboard"):
warnings.warn(
"Pyperclip's support for Cygwin is not perfect,"
"see https://github.com/asweigart/pyperclip/issues/55"
)
return init_dev_clipboard_clipboard()
# Setup for the WINDOWS platform:
elif os.name == "nt" or platform.system() == "Windows":
return init_windows_clipboard()
if platform.system() == "Linux":
with open("/proc/version", "r") as f:
if "Microsoft" in f.read():
return init_wsl_clipboard()
# Setup for the MAC OS X platform:
if os.name == "mac" or platform.system() == "Darwin":
try:
import Foundation # check if pyobjc is installed
import AppKit
except ImportError:
return init_osx_pbcopy_clipboard()
else:
return init_osx_pyobjc_clipboard()
# Setup for the LINUX platform:
if HAS_DISPLAY:
if _executable_exists("xsel"):
return init_xsel_clipboard()
if _executable_exists("xclip"):
return init_xclip_clipboard()
if _executable_exists("klipper") and _executable_exists("qdbus"):
return init_klipper_clipboard()
try:
# qtpy is a small abstraction layer that lets you write applications
# using a single api call to either PyQt or PySide.
# https://pypi.python.org/project/QtPy
import qtpy # check if qtpy is installed
except ImportError:
# If qtpy isn't installed, fall back on importing PyQt4.
try:
import PyQt5 # check if PyQt5 is installed
except ImportError:
try:
import PyQt4 # check if PyQt4 is installed
except ImportError:
pass # We want to fail fast for all non-ImportError exceptions.
else:
return init_qt_clipboard()
else:
return init_qt_clipboard()
else:
return init_qt_clipboard()
return init_no_clipboard()
def set_clipboard(clipboard):
"""
Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how
the copy() and paste() functions interact with the operating system to
implement the copy/paste feature. The clipboard parameter must be one of:
- pbcopy
- pbobjc (default on Mac OS X)
- qt
- xclip
- xsel
- klipper
- windows (default on Windows)
- no (this is what is set when no clipboard mechanism can be found)
"""
global copy, paste
clipboard_types = {
"pbcopy": init_osx_pbcopy_clipboard,
"pyobjc": init_osx_pyobjc_clipboard,
"qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5'
"xclip": init_xclip_clipboard,
"xsel": init_xsel_clipboard,
"klipper": init_klipper_clipboard,
"windows": init_windows_clipboard,
"no": init_no_clipboard,
}
if clipboard not in clipboard_types:
allowed_clipboard_types = [repr(_) for _ in clipboard_types.keys()]
raise ValueError(
f"Argument must be one of {', '.join(allowed_clipboard_types)}"
)
# Sets pyperclip's copy() and paste() functions:
copy, paste = clipboard_types[clipboard]()
def lazy_load_stub_copy(text):
"""
A stub function for copy(), which will load the real copy() function when
called so that the real copy() function is used for later calls.
This allows users to import pyperclip without having determine_clipboard()
automatically run, which will automatically select a clipboard mechanism.
This could be a problem if it selects, say, the memory-heavy PyQt4 module
but the user was just going to immediately call set_clipboard() to use a
different clipboard mechanism.
The lazy loading this stub function implements gives the user a chance to
call set_clipboard() to pick another clipboard mechanism. Or, if the user
simply calls copy() or paste() without calling set_clipboard() first,
will fall back on whatever clipboard mechanism that determine_clipboard()
automatically chooses.
"""
global copy, paste
copy, paste = determine_clipboard()
return copy(text)
def lazy_load_stub_paste():
"""
A stub function for paste(), which will load the real paste() function when
called so that the real paste() function is used for later calls.
This allows users to import pyperclip without having determine_clipboard()
automatically run, which will automatically select a clipboard mechanism.
This could be a problem if it selects, say, the memory-heavy PyQt4 module
but the user was just going to immediately call set_clipboard() to use a
different clipboard mechanism.
The lazy loading this stub function implements gives the user a chance to
call set_clipboard() to pick another clipboard mechanism. Or, if the user
simply calls copy() or paste() without calling set_clipboard() first,
will fall back on whatever clipboard mechanism that determine_clipboard()
automatically chooses.
"""
global copy, paste
copy, paste = determine_clipboard()
return paste()
def is_available() -> bool:
return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste
# Initially, copy() and paste() are set to lazy loading wrappers which will
# set `copy` and `paste` to real functions the first time they're used, unless
# set_clipboard() or determine_clipboard() is called first.
copy, paste = lazy_load_stub_copy, lazy_load_stub_paste
__all__ = ["copy", "paste", "set_clipboard", "determine_clipboard"]
# pandas aliases
clipboard_get = paste
clipboard_set = copy
| TomAugspurger/pandas | pandas/io/clipboard/__init__.py | Python | bsd-3-clause | 21,554 | [
"VisIt"
] | e9125b5e8180640f9d9686967bfda57419984e24c079010ea4575208fbdc0558 |
#!/usr/bin/env python3
from __future__ import division
import sys
import csv
import random
# import required modules
import numpy as np
from scipy import signal
import pylab as plt
import math
# ------------------------------------------------------------
# Some elementary functions
# ------------------------------------------------------------
def filter_nan(s,o):
data = np.array([s,o])
data = np.transpose(data)
data = data[~np.isnan(data).any(1)]
return data[:,0],data[:,1]
def pc_bias(s,o):
s,o = filter_nan(s,o)
return 100.0*sum(s-o)/sum(o)
def apb(s,o):
s,o = filter_nan(s,o)
return 100.0*sum(abs(s-o))/sum(o)
def rmse(s,o):
s,o = filter_nan(s,o)
return np.sqrt(np.mean((s-o)**2))
def mae(s,o):
s,o = filter_nan(s,o)
return np.mean(abs(s-o))
def bias(s,o):
s,o = filter_nan(s,o)
return np.mean(s-o)
def NS(s,o):
s,o = filter_nan(s,o)
return 1 - sum((s-o)**2)/sum((o-np.mean(o))**2)
def correlation(s,o):
s,o = filter_nan(s,o)
return np.corrcoef(o, s)[0,1]
def monoton_i(o,f=0.0):
""" monotonically increasing, f is a noise redusing factor
returns a list witn len(o)-1 members
0 : non monoton ; 1 : monoton
"""
res=np.ones(len(o)-1)
for i in range(len(o)-1):
if((o[i]-o[i+1])>f):
res[i]=0
return res
def monoton_d(o,f=0.0):
""" monotonically decresing
returns a list witn len(o)-1 members, f is a noise redusing factor
0 : non monoton ; 1 : monoton
"""
res=np.ones(len(o)-1)
for i in range(len(o)-1):
if((o[i+1]-o[i])>f):
res[i]=0
return res
# ------------------------------------------------------------
# Bootstrap as a method to estimate the mean and its
# coinfidence; without Gaussian distribution
# ------------------------------------------------------------
def resample(S,size=None):
return [random.choice(S) for i in range(size or len(S))]
def bootstrap(x,coinfidence=0.95, nsample=100):
""" Computes the bootstrap errors of an input list x """
ns=len(x)
if(len(x)<100):
ns=100
means=[np.mean(resample(x,ns)) for k in range(nsample)]
means.sort()
# print means
left_tail=int(((1-0-coinfidence)/2)*nsample)
right_tail=nsample-1-left_tail
return means[left_tail],np.mean(x),means[right_tail]
# ------------------------------------------------------------
# Functions written by Rainer Bruggemann and supplemented using
# http://www.pisces-conservation.com/sdrhelp/index.html
# ------------------------------------------------------------
def pt_pr(o):
""" calculates relative abundancies pr and sum of abundancies = pt
"""
pt=float(sum(o))
pr=np.array(o)
if(pt==0):
return pt,np.zeros(len(o))
return pt,pr/pt
def simpson(o):
""" calculates Simpson index
"""
pt,x=pt_pr(o)
s=float(sum(x*x))
if(s>0):
return 1.0/s # 1.0-s see simvar
return np.nan
def simvar(o):
""" calculates Simpson-variante (see Salomon paper)
o: population of algae, normalized (see pt_pr)
"""
pt,x=pt_pr(o)
return 1.0-sum(x*x)
def richness(o,limit):
""" calculates from o the richness
o: population of algae, normalized (see pt_pr)
limit: if o<=limit, then this population will be ignored
"""
pt,x=pt_pr(o)
rc=0
for i in x:
if(i>limit):
rc+=1
return rc
def shannon(o):
""" calculates Shannon index
o: population of algae, normalized (see pt_pr)
"""
pt,x=pt_pr(o)
return sum(-x[x>0]*np.log(x[x>0]))
def shannons(o):
""" calculates normalized Shannon index
o: population of algae, normalized (see pt_pr)
"""
pt,x=pt_pr(o)
return sum(-x[x>0]*np.log(x[x>0]))/np.log(len(o))
def boltzmann(o):
""" calculates the Boltzmann entropy over a list of
unsign int values
W=N! / PI(Ni!) with: N=length of the list
Ni=length of equal numbers
log(W)/log(N!) is used for normalization
"""
if len(o) > 100:
print('error in blotzmann:', len(o), ' should be <=100')
return -1
h={} # generates the hist
for i in range(len(o)):
if o[i] in h:
h[o[i]]+=1
else:
h[o[i]]=1
s=[math.factorial(i) for i in h.values()]
s=np.double(np.prod(s))
f=np.double(math.factorial(len(o)))
return np.log(f/s)/np.log(f)
def margalefo(o):
""" r is the number of species (richness)
n is total of individuals
"""
if(o==None):
return 0
r=len(o)
n=sum(o)
if(r==0 or n<=1):
return 0
return (r-1)/np.log(n)
def margalef(o):
""" r is the number of species (richness)
n is total of individuals
"""
if(o==None):
return 0
r=richness(o,0.0001)
n=sum(o)
if(r==0 or n<=1):
return 0
return (r-1)/np.log(n)
def sheven(shact,richact):
""" calculates the Shannon-eveness
shact: Shannon-index
richnact: richness index
"""
if(richact<=1.0):
return 0.0
return shact/(np.log(float(richact)))
def partial_sum(o):
"""
uses a numpy array and constructs an array with
partial sum of the ordered array
"""
o.sort()
o=o[::-1]
part=np.zeros(len(o))
for i in np.arange(len(o)):
part[i]=np.sum(o[0:i+1])
return part
def berger_parker(o):
"""
This surprisingly simple index was considered by May (1975)
to be one of the best. It is simple measure of the numerical
importance of the most abundant species.
"""
return float(np.max(o))/np.sum(o)
def McIntosh(o):
"""
Proposed by McIntosh (1967) as:
D=(N-U)/(N-sqrt(N))
with U=sum(ni**2)
"""
N=float(np.sum(o))
U=np.sqrt(float(np.sum(o**2)))
return (N-U)/(N-np.sqrt(N))
def menhinick(o):
"""
Menhinick's index, Dmn (Whittaker, 1977) is calculated using:
D=S/sqrt(N)
where N is the total number of individuals in the sample and S
the species number.
"""
return float(len(o))/np.sqrt(np.sum(o))
def strong(o):
"""
Strong's dominance index, Dw (Strong, 2002), is calculated using:
D=max_i((bi/Q)-i/R)
where:
bi is the sequential cumulative totaling of the ith species abundance
values ranked from largest to smallest;
Q is the total number of individuals in the sample;
R is the number of species in the sample
maxi is the largest calculated ith values
"""
o.sort() # sort it
o[:]=o[::-1] # reverse it
b=np.cumsum(o)
Q=float(np.sum(o))
R=float(len(o))
maxi=0.0
for i in range(len(o)):
d=(b[i]/Q-i/R)
if(maxi<d):
maxi=d
return maxi
# Evenness
def simpson_e(o):
"""
This index is based on Simpson's diversity index, D and is defined as:
E=1/D/S
where D is Simpson's diversity index and
S is the number of species.
"""
return (1.0/simpson(o))/float(len(o))
def McIntosh_e(o):
"""
This is an equitability measure based on the McIntosh dominance index.
McIntosh E is defined as (Pielou, 1975):
D=(N-U)/(N-N/sqrt(s))
where N is the total number of individuals in the sample and
S is the total number of species in the sample.
"""
N=float(np.sum(o))
U=np.sqrt(float(np.sum(o**2)))
S=float(len(o))
return (N-U)/(N-N/np.sqrt(S))
def camargo_e(o):
"""
The Camargo evenness index (Camargo, 1993) is defined as:
E=1-(sum_i(sum_j=i+1((pi-pj)/S)))
where
pi is the proportion of species i in the sample;
pj is the proportion of species j in the sample and
S is the total number of species.
"""
S=float(len(o))
U=float(np.sum(o))
s=0.0
for i in range(len(o)):
for j in range(i,len(o)):
s+=(o[i]/U-o[j]/U)/S
return 1-s
def smith_wilson1_e(o):
"""
Smith and Wilson's evenness index 1-D (Smith & Wilson, 1996) is defined as:
E=(1-D)/(1-1/S) ??? 1-D
where
D is Simpson's diversity index and
S is the total number of species.
"""
D=simpson(o)
return (D-1.0)/(1.0-1.0/len(o))
def smith_wilson2_e(o):
"""
Smith and Wilson's evenness index 1/D (Smith & Wilson, 1996) is defined as:
E=-ln(D)/ln(S) ??? -
where
D is Simpson's diversity index and
S is the total number of species.
"""
D=simpson(o)
return np.log(D)/np.log(float(len(o)))
def gini(o):
sorted_list = sorted(o)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(o) / 2
return (fair_area - area) / fair_area
# ------------------------------------------------------------
# some function for Mixin and Complexity added by William Seitz
# ------------------------------------------------------------
class complexity(object):
def __init__(self,n=30):
""" generates the partion for a given number n=30
"""
if(n<5 or n>50):
print('error in complexity nmust be 5>= n <=50')
return None
self.n=n
self.trys={5 : 7,
6 : 11,
7 : 15,
8 : 22,
9 : 30,
10: 42,
11: 56,
12: 77,
13: 101,
14: 135,
15: 176,
16: 231,
17: 297,
18: 385,
19: 490,
20: 627,
21: 792,
22: 1002,
23: 1255,
24: 1575,
25: 1958,
26: 2436,
27: 3010,
28: 3718,
29: 4565,
30: 5604,
31: 6842,
32: 8349,
33: 10143,
34: 12310,
35: 14883,
36: 17977,
37: 21637,
38: 26015,
39: 31185,
40: 37338,
41: 44583,
42: 53174,
43: 63261,
44: 75175,
45: 89134,
46: 105558,
47: 124754,
48: 147273,
49: 173525,
50: 204226}
self.maxi={5: 0,
6: 1,
7: 2,
8: 4,
9: 7,
10: 12,
11: 19,
12: 29,
13: 42,
14: 61,
15: 87,
16: 120,
17: 164,
18: 222,
19: 297,
20: 392,
21: 515,
22: 669,
23: 866,
24: 1109,
25: 1415,
26: 1792,
27: 2265,
28: 2838,
29: 3550,
30: 4413,
31: 5475,
32: 6751,
33: 8314,
34: 10043,
35: 12460,
36: 15169,
37: 18444,
38: 22332,
39: 27012,
40: 32538,
41: 39156,
42: 46955,
43: 56250,
44: 67162,
45: 80119,
46: 95288,
47: 113229,
48: 134173,
49: 158850,
50: 187593}
self.partition=np.zeros((self.trys[n],n))
a=[0 for i in range(n)]
self.counter=0 # counter to fill the self.partition
self.gen_partition(n,a,0)
self.part=np.zeros((self.trys[n],n))
a=[0 for i in range(n)]
self.counter=0 # counter to fill the self.part
self.gen_partitions(n,a,0)
print(self.part)
self.mixin1=None
def max_complexity(self):
""" approximate soluten for tests only """
self.maxi=0 # maximum incomparable
# found maximum partition
p=self.n*np.ones(self.n,dtype=int)
p[0]=int(np.round(self.n/2+0.1))
for i in range(1,self.n):
p[i]=1+p[i-1]
if(p[i]==self.n):
break
clow=0
chigh=0
icomp=0
for i in range(self.trys[self.n]):
f1=0
f2=0
for j in range(self.n):
if(self.partition[i,j]>p[j]):
f1=1
break
for j in range(self.n):
if(self.partition[i,j]<p[j]):
f2=1
break
if f1==1 and f2==1:
icomp+=1
if f1==0 and f2==1:
clow+=1
if f1==1 and f2==0:
chigh+=1
self.maxi=icomp
print(self.maxi)
def gen_partition(self,n,a,level):
""" recurrent implementation for partition of a given number
"""
a=np.copy(a)
if(n<1):
return
a[level]=n
swp=np.zeros(self.n)
swp[0:(level+1)]=(a[::-1])[(self.n-level-1):self.n]
self.partition[self.counter]=np.cumsum(swp)
self.counter+=1
if(level==0):
first=1
else:
first=a[level-1]
for i in range(first,int(n/2+1.0)):
a[level]=i
self.gen_partition(n-i,a,level+1)
def gen_partitions(self,n,a,level):
""" recurrent implementation for partition of a given number
without cumsum
"""
a=np.copy(a)
if(n<1):
return
a[level]=n
swp=np.zeros(self.n)
swp[0:(level+1)]=(a[::-1])[(self.n-level-1):self.n]
self.part[self.counter]=swp
self.counter+=1
if(level==0):
first=1
else:
first=a[level-1]
for i in range(first,int(n/2+1.0)):
a[level]=i
self.gen_partitions(n-i,a,level+1)
def histogram(self,mx):
""" uses the self.n as bins
uses int values after normalization
it replaces negativ numbers by normalization
to 0..1
"""
m=np.array(mx) # transform it to an ndarray
m=m.astype(float) # make sure that is float
# normalize it check max!=min
if(float(np.max(m)-np.min(m))!=0):
m=(m-float(np.min(m)))/(float(np.max(m)-np.min(m)))
else:
print('error in histogram: Min==Max')
m*=(self.n-1) # put in range 0..self.n-1
m=m.astype(int) # transform it to int
h={} # generates the hist
for i in range(len(m)):
if m[i] in h:
h[m[i]]+=1
else:
h[m[i]]=1
return h.values()
def mixin(self,mx,dis=0):
""" takes a data set mx and generates a historgram
if dis!=0 the data has to be discrete numbers
"""
#h=np.histogram(mx, bins=self.n) # build an histogram
#d1=h[0] # extract the data
if(dis==0):
d1=self.histogram(mx) # build an histogram
d1=np.array(d1)
else:
d1=np.array(mx)
n=len(d1)
if(n<5 or n>50):
print('error in mixin, len(mx):',n,'is wrong!')
self.mixin1=None
return None
self.n=n
d1=d1.astype(float) # transform it in a ndarray
df=np.sort(float(self.n)*d1/np.sum(d1))[::-1] # nomalize the float
d1=np.round(df)
d2=self.n*np.ones(self.n,dtype=int)
l=self.n
dx=0 # store the rest
d2[0]=np.int(np.round(d1[0])) # start with the first of d1[0]
for i in range(1,l): # l is necessary
if(i>=len(d1)):
delta=1.0
else:
delta=np.int(np.round(d1[i]+dx)) # transform the next of d1
dx+=(df[i]-delta)
if(delta>=1):
d2[i]=d2[i-1]+delta
else:
d2[i]=d2[i-1]+1
if(d2[i]>=l):
d2[i]=l
self.mixin1=d2
return True
return True
def comp(self,mx,dis=0):
""" calculates the complexity by comparison the
mixin with all possible
the mixin will be caculated using the gen_mixin(mx)
"""
self.mixin(mx,dis)
if(self.mixin1 is None):
return -1
print(self.mixin1)
clow=0
chigh=0
icomp=0
for i in range(self.trys[self.n]):
f1=0
f2=0
for j in range(self.n):
if(self.partition[i,j]>self.mixin1[j]):
f1=1
break
for j in range(self.n):
if(self.partition[i,j]<self.mixin1[j]):
f2=1
break
if f1==1 and f2==1:
icomp+=1
if f1==0 and f2==1:
clow+=1
if f1==1 and f2==0:
chigh+=1
print('icomp:',icomp,' clow:',clow,' chigh:',chigh)
if(self.maxi[self.n]==0):
return 0
return float(icomp)/float(self.maxi[self.n])
def plot(self,data,ws=32):
""" plot the function, calcs the complexity and a continous wavelet """
N=len(data)
x=np.arange(N)
# wavelet part
widths = np.arange(1, ws)
cwtmatr = signal.cwt(data, signal.ricker, widths)
# define the multiple plot
plt.subplot(2,1,1)
c=self.comp(data)
plt.title('Signal complexity='+str(c))
plt.xlabel('x')
plt.ylabel('y')
plt.grid(True)
plt.plot(x,data)
plt.subplot(2,1,2)
cax=plt.imshow(cwtmatr,aspect='auto')
cbar = plt.colorbar(cax)
plt.xlabel('dt')
plt.ylabel('dF')
plt.grid(True)
plt.show()
def majorization_org(l1,l2):
""" defines the majorizatiin between two lists
a_1 >= b_1
a_1 + a_2 >= b_1 + b_2
a_1 + a_2 + a_3 >= b_1 + b_2 + b_3
...
a_1 + a_2 + ... + a_n-1 >= b_1 + b_2 + ... + b_n-1
a_1 + a_2 + ... + a_n-1 + a_n >= b_1 + b_2 + ... + b_n-1 + b_n
"""
a=0
b=0
r={0}
for (x,y) in zip(l1,l2):
a+=x
b+=y
r|={cmp(a,b)}
return sum(r)
def majorization(l1,l2):
""" defines the majorizatiin between two lists
a_1 >= b_1
a_1 + a_2 >= b_1 + b_2
a_1 + a_2 + a_3 >= b_1 + b_2 + b_3
...
a_1 + a_2 + ... + a_n-1 >= b_1 + b_2 + ... + b_n-1
a_1 + a_2 + ... + a_n-1 + a_n >= b_1 + b_2 + ... + b_n-1 + b_n
"""
a=np.cumsum(l1)
b=np.cumsum(l2)
F1=1
F2=1
for i in range(len(a)-1):
if(a[i]<b[i]):
F1=0
if(F1==1):
return 1
for i in range(len(a)-1):
if(b[i]<a[i]):
F2=0
if(F2==0):
return 0
return -1
# ------------------------------------------------------------
# some functions added by Ralf Wieland
# ------------------------------------------------------------
def fivenum(v):
"""Returns Tukey's five number summary (minimum, lower-hinge,
median, upper-hinge, maximum) for the input vector,
a list or array of numbers based on 1.5 times the
interquartile distance
"""
try:
np.sum(v)
except TypeError:
print('Error: you must provide a list or array of only numbers')
q1 = scoreatpercentile(v,25)
q3 = scoreatpercentile(v,75)
iqd = q3-q1
md = np.median(v)
whisker = 1.5*iqd
return np.min(v), md-whisker, md, md+whisker, np.max(v)
def sevennum(v):
""" help function often useful
read a list of objects and calculates some precentiles
"""
return(
[np.min(v),
np.percentile(v, 10, interpolation='midpoint'),
np.percentile(v, 25, interpolation='midpoint'),
np.median(v),
np.percentile(v, 75, interpolation='midpoint'),
np.percentile(v, 90, interpolation='midpoint'),
np.max(v)]
)
def sensitivity(TP,FN):
""" also called TPR = TruePositives/(TruePositives+FalseNegatives)
"""
if(TP+FN>0):
return TP/(TP+FN)
return np.nan
def precision(TP,FP):
""" see: Machine Learnig - A gentle introduction """
if(TP+FP>0):
return TP/(TP+FP)
return np.nan
def recall(TP,FN):
""" see: Machine Learnig - A gentle introduction """
if(TP+FN>0):
return TP/(TP+FN)
return np.nan
def specificity(FP,TN):
""" also called TNR = TrueNegatives/(TrueNegatives+FalsePositives)
"""
if(FP+TN>0):
return TN/(TN+FP)
return np.nan
def accuracy(TP, TN, FP, FN):
if(TP+TN+FP+FN>0):
return (TP+TN)/(TP+TN+FP+FN)
def f1_score(TP, FP, FN):
""" see: Machine Learnig - A gentle introduction """
return 2.0*precision(TP,FP)*recall(TP,FN)/(precision(TP,FP)+recall(TP,FN))
# after: benhamner:
# https://github.com/benhamner/Metrics/blob/master/Python/ml_metrics/auc.py
def tied_rank(x):
"""
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r
def auc(actual, posterior):
"""
Computes the area under the receiver-operater characteristic (AUC)
This function computes the AUC error metric for binary classification.
Parameters
----------
actual : list of binary numbers, numpy array
The ground truth value
posterior : same type as actual
Defines a ranking on the binary numbers, from most likely to
be positive to least likely to be positive.
Returns
-------
score : double
The mean squared error between actual and posterior
"""
r = tied_rank(posterior)
num_positive = len([0 for x in actual if x==1])
num_negative = len(actual)-num_positive
sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1])
auc = ((sum_positive - num_positive*(num_positive+1)/2.0) /
(num_negative*num_positive))
return auc
# ------------------------------------------------------------
# some useful definitions from pyeeg
# ------------------------------------------------------------
# definition of the hurst exponent
def hurst(p):
""" alternative implementation from pyeeg
trendy H > 0.5
jumping H < 0.5
determine H ~ 1.0
oszilating H ~ 0.5
random H ~ 0.5
"""
N = len(p)
T = np.array([float(i) for i in range(1,N+1)])
Y = np.cumsum(p)
Ave_T = Y/T
S_T = np.zeros((N))
R_T = np.zeros((N))
for i in range(N):
S_T[i] = np.std(p[:i+1])
X_T = Y - T * Ave_T[i]
R_T[i] = np.max(X_T[:i + 1]) - np.min(X_T[:i + 1])
R_S = R_T / S_T
R_S = np.log(R_S)
n = np.log(T).reshape(N, 1)
H = np.linalg.lstsq(n[1:], R_S[1:])[0]
return H[0]
def embed_seq(X,Tau,D):
"""
Examples
---------------
>>> import pyeeg
>>> a=range(0,9)
>>> pyeeg.embed_seq(a,1,4)
array([[ 0., 1., 2., 3.],
[ 1., 2., 3., 4.],
[ 2., 3., 4., 5.],
[ 3., 4., 5., 6.],
[ 4., 5., 6., 7.],
[ 5., 6., 7., 8.]])
>>> pyeeg.embed_seq(a,2,3)
array([[ 0., 2., 4.],
[ 1., 3., 5.],
[ 2., 4., 6.],
[ 3., 5., 7.],
[ 4., 6., 8.]])
>>> pyeeg.embed_seq(a,4,1)
array([[ 0.],
[ 1.],
[ 2.],
[ 3.],
[ 4.],
[ 5.],
[ 6.],
[ 7.],
[ 8.]])
"""
N =len(X)
if D * Tau > N:
print("Cannot build such a matrix, because D * Tau > N")
exit()
if Tau<1:
print("Tau has to be at least 1")
exit()
Y=np.zeros((N - (D - 1) * Tau, D))
for i in range(0, N - (D - 1) * Tau):
for j in range(0, D):
Y[i][j] = X[i + j * Tau]
return Y
def first_order_diff(X):
"""
X = [x(1), x(2), ... , x(N)]
Y = [x(2) - x(1) , x(3) - x(2), ..., x(N) - x(N-1)]
"""
D=[]
for i in range(1,len(X)):
D.append(X[i]-X[i-1])
return D
def pfd(X):
"""
Compute Petrosian Fractal Dimension of a time series
"""
D=first_order_diff(X)
N_delta=0
for i in range(1,len(D)):
if D[i]*D[i-1]<0:
N_delta += 1
n = len(X)
return np.log10(n)/(np.log10(n)+np.log10(n/n+0.4*N_delta))
def hfd(X, Kmax):
""" Compute Hjorth Fractal Dimension of a time series X,
kmax is an HFD parameter
"""
L = []
x = []
N = len(X)
for k in range(1,Kmax):
Lk = []
for m in range(0,k):
Lmk = 0
for i in range(1,int(np.floor((N-m)/k))):
Lmk += np.abs(X[m+i*k] - X[m+i*k-k])
Lmk = Lmk*(N - 1)/np.floor((N - m) / float(k)) / k
Lk.append(Lmk)
L.append(np.log(np.mean(Lk)))
x.append([np.log(float(1) / k), 1])
(p, r1, r2, s)=np.linalg.lstsq(x, L)
return p[0]
def hjorth(X, D = None):
"""
Compute Hjorth mobility and complexity of a time series
"""
if D is None:
D = first_order_diff(X)
D.insert(0, X[0]) # pad the first difference
D = np.array(D)
n = len(X)
M2 = float(np.sum(D ** 2)) / n
TP = np.sum(np.array(X) ** 2)
M4 = 0;
for i in range(1, len(D)):
M4 += (D[i] - D[i - 1]) ** 2
M4 = M4 / n
return np.sqrt(M2 / TP), np.sqrt(float(M4) * TP / M2 / M2)
# ------------------------------------------------------------
# Apen after Pinucs
# ------------------------------------------------------------
def apen(x, mm=2, r=1.0):
"""
x=vector of data
mm=patter deep 1,2,3,...
r=thresh
"""
lll=x.shape[0]-mm+1
phi1=phi2=0
for i in range(lll):
v=0
for j in range(i,lll):
v1=0
for p in range(mm):
v1+=np.abs(x[i+p]-x[j+p])
if(r>v1):
v1=1.0
else:
v1=0.0
v+=v1
phi1-=v/(lll*(lll-1)/2)*np.log(v/(lll*(lll-1)/2))
mm+=1
lll-=1
for i in range(lll):
v=0
for j in range(i,lll):
v1=0
for p in range(mm):
v1+=np.abs(x[i+p]-x[j+p])
if(r>v1):
v1=1.0
else:
v1=0.0
v+=v1
phi2-=v/(lll*(lll-1)/2)*np.log(v/(lll*(lll-1)/2))
return(phi1-phi2)
def main1():
# Different types of time series for testing
#p = np.log10(np.cumsum(np.random.randn(50000)+1)+1000)
# trending, hurst ~ 1
#p = np.log10((np.random.randn(50000))+1000)
# mean reverting, hurst ~ 0
#p = np.log10(np.cumsum(np.random.randn(50000))+1000)
# random walk, hurst ~ 0.5
p=np.random.rand(2000)
print('rand', hurst(p), pfd(p), hfd(p,5), hjorth(p)[1])
p=np.zeros(2000)
for i in range(2000):
p[i]=np.random.normal()
print('norm', hurst(p), pfd(p), hfd(p,5), hjorth(p)[1])
p=np.zeros(2000)
p[0]=np.random.rand()-0.5
for i in range(2000-1):
x=np.random.rand()-0.5
p[i+1]=x+p[i]
print('walk', hurst(p), pfd(p), hfd(p,5), hjorth(p)[1])
p=np.zeros(2000)
for i in range(2000):
if(i%2==0):
p[i]=1+0.1*np.random.rand()
else:
p[i]=-1+0.1*np.random.rand()
print('jump', hurst(p), pfd(p), hfd(p,5), hjorth(p)[1])
p=np.arange(2000)
print('ramp', hurst(p), pfd(p), hfd(p,5), hjorth(p)[1])
# main1()
def test():
o=np.random.rand(10)
print('o=', o)
pt,pr =pt_pr(o)
print('pt=', pt)
print('pr=', pr)
print('simpson(o)=', simpson(o))
print('simpson(pr)=', simpson(pr))
print('simvar(pr)=', simvar(pr))
rich=richness(pr,0.1)
print('richness(pr,0.1)=',rich)
shan=shannon(pr)
print('shannon(pr)=', shan)
print('sheven(shan,rich)=',sheven(shan,rich))
print('****** test after Bruggemann ******')
o=[0.1,0.2,0,0,0.3,0,2,0.1]
print('o=', o)
pt,pr =pt_pr(o)
print('pt=', pt)
print('pr=', pr)
print('simpson(o)=', simpson(o))
print('simpson(pr)=', simpson(pr))
print('simvar(pr)=', simvar(pr))
rich=richness(pr,0.05)
print('richness(pr,0.05)=',rich)
shan=shannon(pr)
print('shannon(pr)=', shan)
print('sheven(shan,rich)=',sheven(shan,rich))
print('*************partial sum*******************')
np.random.seed(None)
x=np.random.normal(0.5,1.0/12,1000)
left,m,right=bootstrap(x,nsample=100)
print("bootstrap normal:", left,m, right)
x=np.random.rand(1000)
left,m,right=bootstrap(x,nsample=100)
print("bootstrap uniform:", left,m, right)
t=np.arange(20)
for i in range(5):
leg=np.random.rand()
# leg=1.0
sleg=str(round(leg,3))
o=leg*np.random.rand(20)
o/=np.sum(o)
o1=partial_sum(o)
plt.plot(t,o1,label=sleg)
plt.legend(loc='upper left')
plt.grid(True)
plt.xlabel('samples')
plt.ylabel('partial sum')
plt.show()
# test()
# main1()
| Ralf3/samt2 | stats/stat_bw3.py | Python | gpl-3.0 | 30,209 | [
"Gaussian"
] | c4d34f9799e2e4e838d0eb05f48e357771d04a3d41a370293fc1c02cbd6f1aa5 |
"""
Implement -f aka looponfailing for py.test.
NOTE that we try to avoid loading and depending on application modules
within the controlling process (the one that starts repeatedly test
processes) otherwise changes to source code can crash
the controlling process which should best never happen.
"""
import py, pytest
import sys
import execnet
def looponfail_main(config):
remotecontrol = RemoteControl(config)
rootdirs = config.getini("looponfailroots")
statrecorder = StatRecorder(rootdirs)
try:
while 1:
remotecontrol.loop_once()
if not remotecontrol.failures and remotecontrol.wasfailing:
continue # the last failures passed, let's immediately rerun all
repr_pytest_looponfailinfo(
failreports=remotecontrol.failures,
rootdirs=rootdirs)
statrecorder.waitonchange(checkinterval=2.0)
except KeyboardInterrupt:
print()
class RemoteControl(object):
def __init__(self, config):
self.config = config
self.failures = []
def trace(self, *args):
if self.config.option.debug:
msg = " ".join([str(x) for x in args])
py.builtin.print_("RemoteControl:", msg)
def initgateway(self):
return execnet.makegateway("popen")
def setup(self, out=None):
if out is None:
out = py.io.TerminalWriter()
if hasattr(self, 'gateway'):
raise ValueError("already have gateway %r" % self.gateway)
self.trace("setting up slave session")
self.gateway = self.initgateway()
self.channel = channel = self.gateway.remote_exec(init_slave_session,
args=self.config.args,
option_dict=vars(self.config.option),
)
remote_outchannel = channel.receive()
def write(s):
out._file.write(s)
out._file.flush()
remote_outchannel.setcallback(write)
def ensure_teardown(self):
if hasattr(self, 'channel'):
if not self.channel.isclosed():
self.trace("closing", self.channel)
self.channel.close()
del self.channel
if hasattr(self, 'gateway'):
self.trace("exiting", self.gateway)
self.gateway.exit()
del self.gateway
def runsession(self):
try:
self.trace("sending", self.failures)
self.channel.send(self.failures)
try:
return self.channel.receive()
except self.channel.RemoteError:
e = sys.exc_info()[1]
self.trace("ERROR", e)
raise
finally:
self.ensure_teardown()
def loop_once(self):
self.setup()
self.wasfailing = self.failures and len(self.failures)
result = self.runsession()
failures, reports, collection_failed = result
if collection_failed:
pass # "Collection failed, keeping previous failure set"
else:
uniq_failures = []
for failure in failures:
if failure not in uniq_failures:
uniq_failures.append(failure)
self.failures = uniq_failures
def repr_pytest_looponfailinfo(failreports, rootdirs):
tr = py.io.TerminalWriter()
if failreports:
tr.sep("#", "LOOPONFAILING", bold=True)
for report in failreports:
if report:
tr.line(report, red=True)
tr.sep("#", "waiting for changes", bold=True)
for rootdir in rootdirs:
tr.line("### Watching: %s" %(rootdir,), bold=True)
def init_slave_session(channel, args, option_dict):
import os, sys
outchannel = channel.gateway.newchannel()
sys.stdout = sys.stderr = outchannel.makefile('w')
channel.send(outchannel)
# prune sys.path to not contain relative paths
newpaths = []
for p in sys.path:
if p:
if not os.path.isabs(p):
p = os.path.abspath(p)
newpaths.append(p)
sys.path[:] = newpaths
#fullwidth, hasmarkup = channel.receive()
from _pytest.config import Config
config = Config.fromdictargs(option_dict, list(args))
config.args = args
from xdist.looponfail import SlaveFailSession
SlaveFailSession(config, channel).main()
class SlaveFailSession:
def __init__(self, config, channel):
self.config = config
self.channel = channel
self.recorded_failures = []
self.collection_failed = False
config.pluginmanager.register(self)
config.option.looponfail = False
config.option.usepdb = False
def DEBUG(self, *args):
if self.config.option.debug:
print(" ".join(map(str, args)))
def pytest_collection(self, session):
self.session = session
self.trails = self.current_command
hook = self.session.ihook
try:
items = session.perform_collect(self.trails or None)
except pytest.UsageError:
items = session.perform_collect(None)
hook.pytest_collection_modifyitems(session=session, config=session.config, items=items)
hook.pytest_collection_finish(session=session)
return True
def pytest_runtest_logreport(self, report):
if report.failed:
self.recorded_failures.append(report)
def pytest_collectreport(self, report):
if report.failed:
self.recorded_failures.append(report)
self.collection_failed = True
def main(self):
self.DEBUG("SLAVE: received configuration, waiting for command trails")
try:
command = self.channel.receive()
except KeyboardInterrupt:
return # in the slave we can't do much about this
self.DEBUG("received", command)
self.current_command = command
self.config.hook.pytest_cmdline_main(config=self.config)
trails, failreports = [], []
for rep in self.recorded_failures:
trails.append(rep.nodeid)
loc = rep.longrepr
loc = str(getattr(loc, 'reprcrash', loc))
failreports.append(loc)
self.channel.send((trails, failreports, self.collection_failed))
class StatRecorder:
def __init__(self, rootdirlist):
self.rootdirlist = rootdirlist
self.statcache = {}
self.check() # snapshot state
def fil(self, p):
return p.check(file=1, dotfile=0) and p.ext != ".pyc"
def rec(self, p):
return p.check(dotfile=0)
def waitonchange(self, checkinterval=1.0):
while 1:
changed = self.check()
if changed:
return
py.std.time.sleep(checkinterval)
def check(self, removepycfiles=True):
changed = False
statcache = self.statcache
newstat = {}
for rootdir in self.rootdirlist:
for path in rootdir.visit(self.fil, self.rec):
oldstat = statcache.pop(path, None)
try:
newstat[path] = curstat = path.stat()
except py.error.ENOENT:
if oldstat:
changed = True
else:
if oldstat:
if oldstat.mtime != curstat.mtime or \
oldstat.size != curstat.size:
changed = True
py.builtin.print_("# MODIFIED", path)
if removepycfiles and path.ext == ".py":
pycfile = path + "c"
if pycfile.check():
pycfile.remove()
else:
changed = True
if statcache:
changed = True
self.statcache = newstat
return changed
| mattias-lundell/pytest-xdist | xdist/looponfail.py | Python | mit | 7,925 | [
"VisIt"
] | b6fcd046344fad5952f2d15518fad086e12e901eeda090289079d34915d18221 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
import logging
import pytest
import uuid
import datetime
import functools
import msrest
from azure.servicebus.management import ServiceBusAdministrationClient, QueueProperties, ApiVersion
from azure.servicebus._common.utils import utc_now
from utilities import get_logger
from azure.core.exceptions import HttpResponseError, ServiceRequestError, ResourceNotFoundError, ResourceExistsError
from azure.servicebus._base_handler import ServiceBusSharedKeyCredential
from devtools_testutils import AzureMgmtTestCase, CachedResourceGroupPreparer
from servicebus_preparer import (
CachedServiceBusNamespacePreparer,
ServiceBusNamespacePreparer
)
from mgmt_test_utilities import (
MgmtQueueListTestHelper,
MgmtQueueListRuntimeInfoTestHelper,
run_test_mgmt_list_with_parameters,
run_test_mgmt_list_with_negative_parameters,
clear_queues,
clear_topics
)
_logger = get_logger(logging.DEBUG)
class ServiceBusAdministrationClientQueueTests(AzureMgmtTestCase):
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_list_basic(self, servicebus_namespace_connection_string, servicebus_namespace,
servicebus_namespace_key_name, servicebus_namespace_primary_key):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
mgmt_service.create_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 1 and queues[0].name == "test_queue"
mgmt_service.delete_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
fully_qualified_namespace = servicebus_namespace.name + '.servicebus.windows.net'
mgmt_service = ServiceBusAdministrationClient(
fully_qualified_namespace,
credential=ServiceBusSharedKeyCredential(servicebus_namespace_key_name, servicebus_namespace_primary_key)
)
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
mgmt_service.create_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 1 and queues[0].name == "test_queue"
mgmt_service.delete_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_list_with_special_chars(self, servicebus_namespace_connection_string):
# Queue names can contain letters, numbers, periods (.), hyphens (-), underscores (_), and slashes (/), up to 260 characters. Queue names are also case-insensitive.
queue_name = 'txt/.-_123'
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
mgmt_service.create_queue(queue_name)
queues = list(mgmt_service.list_queues())
assert len(queues) == 1 and queues[0].name == queue_name
mgmt_service.delete_queue(queue_name)
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_list_with_parameters(self, servicebus_namespace_connection_string):
pytest.skip("start_idx and max_count are currently removed, they might come back in the future.")
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
run_test_mgmt_list_with_parameters(MgmtQueueListTestHelper(mgmt_service))
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_list_with_negative_credential(self, servicebus_namespace, servicebus_namespace_key_name,
servicebus_namespace_primary_key):
# invalid_conn_str = 'Endpoint=sb://invalid.servicebus.windows.net/;SharedAccessKeyName=invalid;SharedAccessKey=invalid'
# mgmt_service = ServiceBusAdministrationClient.from_connection_string(invalid_conn_str)
# with pytest.raises(ServiceRequestError):
# list(mgmt_service.list_queues())
# TODO: This negative test makes replay test fail. Need more investigation. Live test has no problem.
invalid_conn_str = 'Endpoint=sb://{}.servicebus.windows.net/;SharedAccessKeyName=invalid;SharedAccessKey=invalid'.format(servicebus_namespace.name)
mgmt_service = ServiceBusAdministrationClient.from_connection_string(invalid_conn_str)
with pytest.raises(HttpResponseError):
list(mgmt_service.list_queues())
# fully_qualified_namespace = 'invalid.servicebus.windows.net'
# mgmt_service = ServiceBusAdministrationClient(
# fully_qualified_namespace,
# credential=ServiceBusSharedKeyCredential(servicebus_namespace_key_name, servicebus_namespace_primary_key)
# )
# with pytest.raises(ServiceRequestError):
# list(mgmt_service.list_queues())
fully_qualified_namespace = servicebus_namespace.name + '.servicebus.windows.net'
mgmt_service = ServiceBusAdministrationClient(
fully_qualified_namespace,
credential=ServiceBusSharedKeyCredential("invalid", "invalid")
)
with pytest.raises(HttpResponseError):
list(mgmt_service.list_queues())
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_list_with_negative_parameters(self, servicebus_namespace_connection_string):
pytest.skip("start_idx and max_count are currently removed, they might come back in the future.")
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
run_test_mgmt_list_with_negative_parameters(MgmtQueueListTestHelper(mgmt_service))
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_delete_basic(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
mgmt_service.create_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 1
mgmt_service.create_queue('txt/.-_123')
queues = list(mgmt_service.list_queues())
assert len(queues) == 2
mgmt_service.delete_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 1 and queues[0].name == 'txt/.-_123'
mgmt_service.delete_queue('txt/.-_123')
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_delete_one_and_check_not_existing(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
for i in range(10):
mgmt_service.create_queue("queue{}".format(i))
delete_idx = 0
to_delete_queue_name = "queue{}".format(delete_idx)
mgmt_service.delete_queue(to_delete_queue_name)
queue_names = [queue.name for queue in list(mgmt_service.list_queues())]
assert len(queue_names) == 9 and to_delete_queue_name not in queue_names
for name in queue_names:
mgmt_service.delete_queue(name)
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_delete_negtive(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
mgmt_service.create_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 1
mgmt_service.delete_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
with pytest.raises(ResourceNotFoundError):
mgmt_service.delete_queue("test_queue")
with pytest.raises(ResourceNotFoundError):
mgmt_service.delete_queue("non_existing_queue")
with pytest.raises(ValueError):
mgmt_service.delete_queue("")
with pytest.raises(TypeError):
mgmt_service.delete_queue(queue_name=None)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_create_by_name(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queue_name = "queue_testaddf"
mgmt_service.create_queue(queue_name)
created_at_utc = utc_now()
try:
queue = mgmt_service.get_queue(queue_name)
assert queue.name == queue_name
assert queue.availability_status == 'Available'
assert queue.status == 'Active'
# assert created_at_utc < queue.created_at_utc < utc_now() + datetime.timedelta(minutes=10)
finally:
mgmt_service.delete_queue(queue_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_create_with_invalid_name(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
with pytest.raises(msrest.exceptions.ValidationError):
mgmt_service.create_queue(Exception())
with pytest.raises(msrest.exceptions.ValidationError):
mgmt_service.create_queue('')
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_create_with_queue_description(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queue_name = "iweidk"
queue_name_2 = "vladsk"
queue_name_3 = "famviq"
topic_name = "aghadh"
#TODO: Why don't we have an input model (queueOptions? as superclass of QueueProperties?) and output model to not show these params?
#TODO: This fails with the following: E msrest.exceptions.DeserializationError: Find several XML 'prefix:DeadLetteringOnMessageExpiration' where it was not expected .tox\whl\lib\site-packages\msrest\serialization.py:1262: DeserializationError
mgmt_service.create_topic(topic_name)
mgmt_service.create_queue(
queue_name,
auto_delete_on_idle=datetime.timedelta(minutes=10),
dead_lettering_on_message_expiration=True,
default_message_time_to_live=datetime.timedelta(minutes=11),
duplicate_detection_history_time_window=datetime.timedelta(minutes=12),
enable_batched_operations=True,
enable_express=True,
enable_partitioning=True,
forward_dead_lettered_messages_to=topic_name,
forward_to=topic_name,
lock_duration=datetime.timedelta(seconds=13),
max_delivery_count=14,
max_size_in_megabytes=3072,
#requires_duplicate_detection=True,
requires_session=True
)
mgmt_service.create_queue(
queue_name_2,
auto_delete_on_idle="PT10M1S",
dead_lettering_on_message_expiration=True,
default_message_time_to_live="PT11M2S",
duplicate_detection_history_time_window="PT12M3S",
enable_batched_operations=True,
enable_express=True,
enable_partitioning=True,
forward_dead_lettered_messages_to=topic_name,
forward_to=topic_name,
lock_duration="PT13S",
max_delivery_count=14,
max_size_in_megabytes=3072,
requires_session=True
)
with pytest.raises(HttpResponseError):
mgmt_service.create_queue(
queue_name_3,
max_message_size_in_kilobytes=1024 # basic/standard ties does not support
)
try:
queue = mgmt_service.get_queue(queue_name)
assert queue.name == queue_name
assert queue.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert queue.dead_lettering_on_message_expiration == True
assert queue.default_message_time_to_live == datetime.timedelta(minutes=11)
assert queue.duplicate_detection_history_time_window == datetime.timedelta(minutes=12)
assert queue.enable_batched_operations == True
assert queue.enable_express == True
assert queue.enable_partitioning == True
assert queue.forward_dead_lettered_messages_to.endswith(".servicebus.windows.net/{}".format(topic_name))
assert queue.forward_to.endswith(".servicebus.windows.net/{}".format(topic_name))
assert queue.lock_duration == datetime.timedelta(seconds=13)
assert queue.max_delivery_count == 14
assert queue.max_size_in_megabytes % 3072 == 0 # TODO: In my local test, I don't see a multiple of the input number. To confirm
# This is disabled due to the following error:
# azure.core.exceptions.HttpResponseError: SubCode=40000. Both DelayedPersistence property and RequiresDuplicateDetection property cannot be enabled together.
# To know more visit https://aka.ms/sbResourceMgrExceptions.
#assert queue.requires_duplicate_detection == True
assert queue.requires_session == True
queue2 = mgmt_service.get_queue(queue_name_2)
assert queue2.name == queue_name_2
assert queue2.auto_delete_on_idle == datetime.timedelta(minutes=10, seconds=1)
assert queue2.dead_lettering_on_message_expiration == True
assert queue2.default_message_time_to_live == datetime.timedelta(minutes=11, seconds=2)
assert queue2.duplicate_detection_history_time_window == datetime.timedelta(minutes=12, seconds=3)
assert queue2.enable_batched_operations == True
assert queue2.enable_express == True
assert queue2.enable_partitioning == True
assert queue2.forward_dead_lettered_messages_to.endswith(".servicebus.windows.net/{}".format(topic_name))
assert queue2.forward_to.endswith(".servicebus.windows.net/{}".format(topic_name))
assert queue2.lock_duration == datetime.timedelta(seconds=13)
assert queue2.max_delivery_count == 14
assert queue2.max_size_in_megabytes % 3072 == 0
assert queue2.requires_session == True
finally:
mgmt_service.delete_queue(queue_name)
mgmt_service.delete_queue(queue_name_2)
mgmt_service.delete_topic(topic_name)
mgmt_service.close()
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest', sku='Premium')
def test_mgmt_queue_premium_create_with_queue_description(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queue_name = "iweidk"
queue_name_2 = "cpqmva"
queue_name_3 = "rekocd"
mgmt_service.create_queue(
queue_name,
auto_delete_on_idle=datetime.timedelta(minutes=10),
dead_lettering_on_message_expiration=True,
default_message_time_to_live=datetime.timedelta(minutes=11),
duplicate_detection_history_time_window=datetime.timedelta(minutes=12),
enable_batched_operations=True,
#enable_express=True, # not enabled on premium
#enable_partitioning=True, # not enabled on premium
lock_duration=datetime.timedelta(seconds=13),
max_delivery_count=14,
max_size_in_megabytes=3072,
#requires_duplicate_detection=True, # not enabled on premium
requires_session=True,
max_message_size_in_kilobytes=12345
)
mgmt_service.create_queue(
queue_name_2,
auto_delete_on_idle="PT10M1S",
dead_lettering_on_message_expiration=True,
default_message_time_to_live="PT11M2S",
duplicate_detection_history_time_window="PT12M3S",
enable_batched_operations=True,
lock_duration="PT13S",
max_delivery_count=14,
max_size_in_megabytes=3072,
requires_session=True
) # default max_message_size_in_kilobytes is 1024
with pytest.raises(HttpResponseError):
mgmt_service.create_queue(
queue_name_3,
max_message_size_in_kilobytes=1023 # min allowed is 1024
)
with pytest.raises(HttpResponseError):
mgmt_service.create_queue(
queue_name_3,
max_message_size_in_kilobytes=102401 # max allowed is 102400
)
try:
queue = mgmt_service.get_queue(queue_name)
assert queue.name == queue_name
assert queue.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert queue.dead_lettering_on_message_expiration == True
assert queue.default_message_time_to_live == datetime.timedelta(minutes=11)
assert queue.duplicate_detection_history_time_window == datetime.timedelta(minutes=12)
assert queue.enable_batched_operations == True
# enable_express is not supported for the premium sku, see doc
# https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-premium-messaging#express-entities
# assert queue.enable_express == True
# partitioning is not available for the the premium sku, see doc
# https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-partitioning
# assert queue.enable_partitioning == True
assert queue.lock_duration == datetime.timedelta(seconds=13)
assert queue.max_delivery_count == 14
assert queue.max_size_in_megabytes % 3072 == 0 # TODO: In my local test, I don't see a multiple of the input number. To confirm
# This is disabled due to the following error:
# azure.core.exceptions.HttpResponseError: SubCode=40000. Both DelayedPersistence property and RequiresDuplicateDetection property cannot be enabled together.
# To know more visit https://aka.ms/sbResourceMgrExceptions.
#assert queue.requires_duplicate_detection == True
assert queue.requires_session == True
assert queue.max_message_size_in_kilobytes == 12345
queue_2 = mgmt_service.get_queue(queue_name_2)
assert queue_2.name == queue_name_2
assert queue_2.auto_delete_on_idle == datetime.timedelta(minutes=10, seconds=1)
assert queue_2.dead_lettering_on_message_expiration == True
assert queue_2.default_message_time_to_live == datetime.timedelta(minutes=11, seconds=2)
assert queue_2.duplicate_detection_history_time_window == datetime.timedelta(minutes=12, seconds=3)
assert queue_2.enable_batched_operations == True
assert queue_2.lock_duration == datetime.timedelta(seconds=13)
assert queue_2.max_delivery_count == 14
assert queue_2.max_size_in_megabytes % 3072 == 0
assert queue_2.requires_session == True
assert queue_2.max_message_size_in_kilobytes == 1024
queue_2.max_message_size_in_kilobytes = 54321
mgmt_service.update_queue(queue_2)
queue_2_new = mgmt_service.get_queue(queue_name_2)
assert queue_2_new.max_message_size_in_kilobytes == 54321
finally:
mgmt_service.delete_queue(queue_name)
mgmt_service.delete_queue(queue_name_2)
mgmt_service.close()
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_create_duplicate(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queue_name = "rtofdk"
mgmt_service.create_queue(queue_name)
try:
with pytest.raises(ResourceExistsError):
mgmt_service.create_queue(queue_name)
finally:
mgmt_service.delete_queue(queue_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_update_success(self, servicebus_namespace_connection_string, servicebus_namespace, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queue_name = "fjrui"
topic_name = "sagho"
queue_description = mgmt_service.create_queue(queue_name)
mgmt_service.create_topic(topic_name)
try:
# Try updating one setting.
queue_description.lock_duration = datetime.timedelta(minutes=2)
mgmt_service.update_queue(queue_description)
queue_description = mgmt_service.get_queue(queue_name)
assert queue_description.lock_duration == datetime.timedelta(minutes=2)
# Update forwarding settings with entity name.
queue_description.forward_to = topic_name
queue_description.forward_dead_lettered_messages_to = topic_name
mgmt_service.update_queue(queue_description)
queue_description = mgmt_service.get_queue(queue_name)
assert queue_description.forward_dead_lettered_messages_to.endswith(".servicebus.windows.net/{}".format(topic_name))
assert queue_description.forward_to.endswith(".servicebus.windows.net/{}".format(topic_name))
# Update forwarding settings with None.
queue_description.forward_to = None
queue_description.forward_dead_lettered_messages_to = None
mgmt_service.update_queue(queue_description)
queue_description = mgmt_service.get_queue(queue_name)
assert queue_description.forward_dead_lettered_messages_to is None
assert queue_description.forward_to is None
# Now try updating all settings.
queue_description.auto_delete_on_idle = datetime.timedelta(minutes=10)
queue_description.dead_lettering_on_message_expiration = True
queue_description.default_message_time_to_live = datetime.timedelta(minutes=11)
queue_description.duplicate_detection_history_time_window = datetime.timedelta(minutes=12)
queue_description.enable_batched_operations = True
queue_description.enable_express = True
#queue_description.enable_partitioning = True # Cannot be changed after creation
queue_description.lock_duration = datetime.timedelta(seconds=13)
queue_description.max_delivery_count = 14
queue_description.max_size_in_megabytes = 3072
queue_description.forward_to = "sb://{}.servicebus.windows.net/{}".format(servicebus_namespace.name, queue_name)
queue_description.forward_dead_lettered_messages_to = "sb://{}.servicebus.windows.net/{}".format(servicebus_namespace.name, queue_name)
#queue_description.requires_duplicate_detection = True # Read only
#queue_description.requires_session = True # Cannot be changed after creation
mgmt_service.update_queue(queue_description)
queue_description = mgmt_service.get_queue(queue_name)
assert queue_description.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert queue_description.dead_lettering_on_message_expiration == True
assert queue_description.default_message_time_to_live == datetime.timedelta(minutes=11)
assert queue_description.duplicate_detection_history_time_window == datetime.timedelta(minutes=12)
assert queue_description.enable_batched_operations == True
assert queue_description.enable_express == True
#assert queue_description.enable_partitioning == True
assert queue_description.lock_duration == datetime.timedelta(seconds=13)
assert queue_description.max_delivery_count == 14
assert queue_description.max_size_in_megabytes == 3072
# Note: We endswith to avoid the fact that the servicebus_namespace_name is replacered locally but not in the properties bag, and still test this.
assert queue_description.forward_to.endswith(".servicebus.windows.net/{}".format(queue_name))
assert queue_description.forward_dead_lettered_messages_to.endswith(".servicebus.windows.net/{}".format(queue_name))
#assert queue_description.requires_duplicate_detection == True
#assert queue_description.requires_session == True
queue_description.auto_delete_on_idle = "PT10M1S"
queue_description.default_message_time_to_live = "PT11M2S"
queue_description.duplicate_detection_history_time_window = "PT12M3S"
mgmt_service.update_queue(queue_description)
queue_description = mgmt_service.get_queue(queue_name)
assert queue_description.auto_delete_on_idle == datetime.timedelta(minutes=10, seconds=1)
assert queue_description.default_message_time_to_live == datetime.timedelta(minutes=11, seconds=2)
assert queue_description.duplicate_detection_history_time_window == datetime.timedelta(minutes=12, seconds=3)
# updating all settings with keyword arguments.
mgmt_service.update_queue(
queue_description,
auto_delete_on_idle=datetime.timedelta(minutes=15),
dead_lettering_on_message_expiration=False,
default_message_time_to_live=datetime.timedelta(minutes=16),
duplicate_detection_history_time_window=datetime.timedelta(minutes=17),
enable_batched_operations=False,
enable_express=False,
lock_duration=datetime.timedelta(seconds=18),
max_delivery_count=15,
max_size_in_megabytes=2048,
forward_to=None,
forward_dead_lettered_messages_to=None
)
queue_description = mgmt_service.get_queue(queue_name)
assert queue_description.auto_delete_on_idle == datetime.timedelta(minutes=15)
assert queue_description.dead_lettering_on_message_expiration == False
assert queue_description.default_message_time_to_live == datetime.timedelta(minutes=16)
assert queue_description.duplicate_detection_history_time_window == datetime.timedelta(minutes=17)
assert queue_description.enable_batched_operations == False
assert queue_description.enable_express == False
#assert queue_description.enable_partitioning == True
assert queue_description.lock_duration == datetime.timedelta(seconds=18)
assert queue_description.max_delivery_count == 15
assert queue_description.max_size_in_megabytes == 2048
# Note: We endswith to avoid the fact that the servicebus_namespace_name is replacered locally but not in the properties bag, and still test this.
assert queue_description.forward_to == None
assert queue_description.forward_dead_lettered_messages_to == None
#assert queue_description.requires_duplicate_detection == True
#assert queue_description.requires_session == True
finally:
mgmt_service.delete_queue(queue_name)
mgmt_service.delete_topic(topic_name)
mgmt_service.close()
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_update_invalid(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queue_name = "dfjfj"
queue_description = mgmt_service.create_queue(queue_name)
try:
# handle a null update properly.
with pytest.raises(TypeError):
mgmt_service.update_queue(None)
# handle an invalid type update properly.
with pytest.raises(TypeError):
mgmt_service.update_queue(Exception("test"))
# change a setting we can't change; should fail.
queue_description.requires_session = True
with pytest.raises(HttpResponseError):
mgmt_service.update_queue(queue_description)
queue_description.requires_session = False
#change the name to a queue that doesn't exist; should fail.
queue_description.name = "iewdm"
with pytest.raises(HttpResponseError):
mgmt_service.update_queue(queue_description)
queue_description.name = queue_name
#change the name to a queue with an invalid name exist; should fail.
queue_description.name = ''
with pytest.raises(msrest.exceptions.ValidationError):
mgmt_service.update_queue(queue_description)
queue_description.name = queue_name
#change to a setting with an invalid value; should still fail.
queue_description.lock_duration = datetime.timedelta(days=25)
with pytest.raises(HttpResponseError):
mgmt_service.update_queue(queue_description)
queue_description.lock_duration = datetime.timedelta(minutes=5)
finally:
mgmt_service.delete_queue(queue_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_list_runtime_properties_basic(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queues = list(mgmt_service.list_queues())
queues_infos = list(mgmt_service.list_queues_runtime_properties())
assert len(queues) == len(queues_infos) == 0
mgmt_service.create_queue("test_queue")
queues = list(mgmt_service.list_queues())
queues_infos = list(mgmt_service.list_queues_runtime_properties())
assert len(queues) == 1 and len(queues_infos) == 1
assert queues[0].name == queues_infos[0].name == "test_queue"
info = queues_infos[0]
assert info.size_in_bytes == 0
assert info.accessed_at_utc is not None
assert info.updated_at_utc is not None
assert info.total_message_count == 0
assert info.active_message_count == 0
assert info.dead_letter_message_count == 0
assert info.transfer_dead_letter_message_count == 0
assert info.transfer_message_count == 0
assert info.scheduled_message_count == 0
mgmt_service.delete_queue("test_queue")
queues_infos = list(mgmt_service.list_queues_runtime_properties())
assert len(queues_infos) == 0
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_list_runtime_properties_with_negative_parameters(self, servicebus_namespace_connection_string):
pytest.skip("start_idx and max_count are currently removed, they might come back in the future.")
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
run_test_mgmt_list_with_negative_parameters(MgmtQueueListRuntimeInfoTestHelper(mgmt_service))
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_list_runtime_properties_with_parameters(self, servicebus_namespace_connection_string):
pytest.skip("start_idx and max_count are currently removed, they might come back in the future.")
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
run_test_mgmt_list_with_parameters(MgmtQueueListRuntimeInfoTestHelper(mgmt_service))
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_get_runtime_properties_basic(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
mgmt_service.create_queue("test_queue")
try:
queue_runtime_properties = mgmt_service.get_queue_runtime_properties("test_queue")
assert queue_runtime_properties
assert queue_runtime_properties.name == "test_queue"
assert queue_runtime_properties.size_in_bytes == 0
assert queue_runtime_properties.created_at_utc is not None
assert queue_runtime_properties.accessed_at_utc is not None
assert queue_runtime_properties.updated_at_utc is not None
assert queue_runtime_properties.total_message_count == 0
assert queue_runtime_properties.active_message_count == 0
assert queue_runtime_properties.dead_letter_message_count == 0
assert queue_runtime_properties.transfer_dead_letter_message_count == 0
assert queue_runtime_properties.transfer_message_count == 0
assert queue_runtime_properties.scheduled_message_count == 0
finally:
mgmt_service.delete_queue("test_queue")
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_get_runtime_properties_negative(self, servicebus_namespace_connection_string):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
with pytest.raises(TypeError):
mgmt_service.get_queue_runtime_properties(None)
with pytest.raises(msrest.exceptions.ValidationError):
mgmt_service.get_queue_runtime_properties("")
with pytest.raises(ResourceNotFoundError):
mgmt_service.get_queue_runtime_properties("non_existing_queue")
def test_queue_properties_constructor(self):
with pytest.raises(TypeError):
QueueProperties("randomname")
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_update_dict_success(self, servicebus_namespace_connection_string, servicebus_namespace, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queue_name = "fjruid"
queue_description = mgmt_service.create_queue(queue_name)
queue_description_dict = dict(queue_description)
try:
# Try updating one setting.
queue_description_dict["lock_duration"] = datetime.timedelta(minutes=2)
mgmt_service.update_queue(queue_description_dict)
queue_description = mgmt_service.get_queue(queue_name)
assert queue_description.lock_duration == datetime.timedelta(minutes=2)
# Now try updating all settings.
queue_description_dict = dict(queue_description)
queue_description_dict["auto_delete_on_idle"] = datetime.timedelta(minutes=10)
queue_description_dict["dead_lettering_on_message_expiration"] = True
queue_description_dict["default_message_time_to_live"] = datetime.timedelta(minutes=11)
queue_description_dict["duplicate_detection_history_time_window"] = datetime.timedelta(minutes=12)
queue_description_dict["enable_batched_operations"] = True
queue_description_dict["enable_express"] = True
#queue_description_dict["enable_partitioning"] = True # Cannot be changed after creation
queue_description_dict["lock_duration"] = datetime.timedelta(seconds=13)
queue_description_dict["max_delivery_count"] = 14
queue_description_dict["max_size_in_megabytes"] = 3072
queue_description_dict["forward_to"] = "sb://{}.servicebus.windows.net/{}".format(servicebus_namespace.name, queue_name)
queue_description_dict["forward_dead_lettered_messages_to"] = "sb://{}.servicebus.windows.net/{}".format(servicebus_namespace.name, queue_name)
#queue_description_dict["requires_duplicate_detection"] = True # Read only
#queue_description_dict["requires_session"] = True # Cannot be changed after creation
mgmt_service.update_queue(queue_description_dict)
queue_description = mgmt_service.get_queue(queue_name)
assert queue_description.auto_delete_on_idle == datetime.timedelta(minutes=10)
assert queue_description.dead_lettering_on_message_expiration == True
assert queue_description.default_message_time_to_live == datetime.timedelta(minutes=11)
assert queue_description.duplicate_detection_history_time_window == datetime.timedelta(minutes=12)
assert queue_description.enable_batched_operations == True
assert queue_description.enable_express == True
#assert queue_description.enable_partitioning == True
assert queue_description.lock_duration == datetime.timedelta(seconds=13)
assert queue_description.max_delivery_count == 14
assert queue_description.max_size_in_megabytes == 3072
# Note: We endswith to avoid the fact that the servicebus_namespace_name is replacered locally but not in the properties bag, and still test this.
assert queue_description.forward_to.endswith(".servicebus.windows.net/{}".format(queue_name))
assert queue_description.forward_dead_lettered_messages_to.endswith(".servicebus.windows.net/{}".format(queue_name))
#assert queue_description.requires_duplicate_detection == True
#assert queue_description.requires_session == True
# updating all settings with keyword arguments.
mgmt_service.update_queue(
dict(queue_description),
auto_delete_on_idle=datetime.timedelta(minutes=15),
dead_lettering_on_message_expiration=False,
default_message_time_to_live=datetime.timedelta(minutes=16),
duplicate_detection_history_time_window=datetime.timedelta(minutes=17),
enable_batched_operations=False,
enable_express=False,
lock_duration=datetime.timedelta(seconds=18),
max_delivery_count=15,
max_size_in_megabytes=2048,
forward_to=None,
forward_dead_lettered_messages_to=None
)
queue_description = mgmt_service.get_queue(queue_name)
assert queue_description.auto_delete_on_idle == datetime.timedelta(minutes=15)
assert queue_description.dead_lettering_on_message_expiration == False
assert queue_description.default_message_time_to_live == datetime.timedelta(minutes=16)
assert queue_description.duplicate_detection_history_time_window == datetime.timedelta(minutes=17)
assert queue_description.enable_batched_operations == False
assert queue_description.enable_express == False
#assert queue_description.enable_partitioning == True
assert queue_description.lock_duration == datetime.timedelta(seconds=18)
assert queue_description.max_delivery_count == 15
assert queue_description.max_size_in_megabytes == 2048
# Note: We endswith to avoid the fact that the servicebus_namespace_name is replacered locally but not in the properties bag, and still test this.
assert queue_description.forward_to == None
assert queue_description.forward_dead_lettered_messages_to == None
#assert queue_description.requires_duplicate_detection == True
#assert queue_description.requires_session == True
finally:
mgmt_service.delete_queue(queue_name)
mgmt_service.close()
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_update_dict_error(self, servicebus_namespace_connection_string, **kwargs):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string)
clear_queues(mgmt_service)
queue_name = "dfjdfj"
queue_description = mgmt_service.create_queue(queue_name)
# send in queue dict without non-name keyword args
queue_description_only_name = {"name": queue_name}
try:
with pytest.raises(TypeError):
mgmt_service.update_queue(queue_description_only_name)
finally:
mgmt_service.delete_queue(queue_name)
@CachedResourceGroupPreparer(name_prefix='servicebustest')
@CachedServiceBusNamespacePreparer(name_prefix='servicebustest')
def test_mgmt_queue_basic_v2017_04(self, servicebus_namespace_connection_string, servicebus_namespace,
servicebus_namespace_key_name, servicebus_namespace_primary_key):
mgmt_service = ServiceBusAdministrationClient.from_connection_string(servicebus_namespace_connection_string, api_version=ApiVersion.V2017_04)
clear_queues(mgmt_service)
mgmt_service.create_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 1 and queues[0].name == "test_queue"
queue = mgmt_service.get_queue("test_queue")
assert queue.name == "test_queue"
mgmt_service.delete_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
with pytest.raises(HttpResponseError):
mgmt_service.create_queue("queue_can_not_be_created", max_message_size_in_kilobytes=1024)
fully_qualified_namespace = servicebus_namespace.name + '.servicebus.windows.net'
mgmt_service = ServiceBusAdministrationClient(
fully_qualified_namespace,
credential=ServiceBusSharedKeyCredential(servicebus_namespace_key_name, servicebus_namespace_primary_key),
api_version=ApiVersion.V2017_04
)
mgmt_service.create_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 1 and queues[0].name == "test_queue"
queue = mgmt_service.get_queue("test_queue")
assert queue.name == "test_queue"
mgmt_service.delete_queue("test_queue")
queues = list(mgmt_service.list_queues())
assert len(queues) == 0
with pytest.raises(HttpResponseError):
mgmt_service.create_queue("queue_can_not_be_created", max_message_size_in_kilobytes=1024)
| Azure/azure-sdk-for-python | sdk/servicebus/azure-servicebus/tests/mgmt_tests/test_mgmt_queues.py | Python | mit | 45,547 | [
"VisIt"
] | be3f4bf3d4d1d08585944048b61951ef75891f70779f8bafd00e8a981533deae |
# PYTHON 3
#
# Author: Kate Willett
# Created: 4 March 2019
# Last update: 15 April 2019
# Location: /data/local/hadkw/HADCRUH2/UPDATE2014/PROGS/PYTHON/
# GitHub: https://github.com/Kate-Willett/PYTHON
# -----------------------
# CODE PURPOSE AND OUTPUT
# -----------------------
# This code reads in monthly mean gridded (5by5) netCDF files and produces area average time series
# in netCDF and ASCII
#
# Note that the mdi (-1e30) is different between IDL (float?) and Python (double?) and at the moment
# I have netCDF files created in both IDL and Python. So - first thing is to reset all missing values to
# the Python mdi used here.
# Actually now I make netCDF files with -999. as the missing data!
#
# This code was originally IDL written by Kate Willett make_area_avg_ts.pro and used
# globalmean.pro to do the area averaging which was written in IDL by Tim Osborn
#
# <references to related published material, e.g. that describes data set>
#
# -----------------------
# LIST OF MODULES
# -----------------------
## Modules
#from datetime import datetime
#import numpy as np
#from matplotlib.dates import date2num,num2date
#import sys, os
#from scipy.optimize import curve_fit,fsolve,leastsq
#from scipy import pi,sqrt,exp
#from scipy.special import erf
#import scipy.stats
#from math import sqrt,pi,radians,sin,cos,acos
#import struct
#from netCDF4 import Dataset
#from netCDF4 import stringtoarr # for putting strings in as netCDF variables
#import pdb
#
## Kates:
#import TestLeap
#from ReadNetCDF import GetGrid4
#from ReadNetCDF import GetGrid4Slice
#from GetNiceTimes import MakeDaysSince
#
# -----------------------
# DATA
# -----------------------
# HadISDH-land:
# /data/local/hadkw/HADCRUH2/UPDATE2016/STATISTICS/GRIDS/
# HadISDH.landq.3.0.0.2016p_FLATgridIDPHA5by5_anoms7605_JAN2017_cf.nc
# HadISDH-marine
# /data/local/hadkw/HADCRUH2/UPDATE2016/STATISTICS/GRIDS/
# HadISDH.marineq.1.0.0.2016p_OBSclim2BClocal_anoms8110_JAN2017_cf.nc
# HadISDH.marineq.1.0.0.2016p_OBSclim2BClocalship_anoms8110_JAN2017_cf.nc
# HadISDH-blend:
# /data/local/hadkw/HADCRUH2/UPDATE2016/STATISTICS/GRIDS/
# HadISDH.blendq.1.0.0.2016p_FULL_anoms8110_JAN2017_cf.nc
# HadISDH.blendq.1.0.0.2016p_FULLship_anoms8110_JAN2017_cf.nc
# Other:
#
# -----------------------
# HOW TO RUN THE CODE
# -----------------------
# Make sure all of the EDITABLES are correct
# module load scitools/default-current
# python MakeAreaAvgTS.py
#
# NOT ANYMORE: if you want era5 data masked to HadISDH then set MaskIt = True internally
# if you want different years or regions then reset internally
#> module load scitools/default-current
#> python MakeGridTrends --var <var> --typee <type> --year1 <yyyy> --year2 <yyyy>
#
## Which variable?
# var = 'dpd' #'dpd','td','t','tw','e','q','rh'
#
## Which homog type?
# typee = 'LAND', 'RAW','OTHER', 'BLEND', 'BLENDSHIP', 'MARINE','MARINESHIP', 'ERA5','EAR5MASK','ERA5LAND','ERA5MARINE','ERA5LANDMASK','ERA5MARINEMASK'
#
# year1 and year2 are start and end year of trends
#
# -----------------------
# OUTPUT
# -----------------------
# /data/local/hadkw/HADCRUH2/UPDATE2016/STATISTICS/TIMESERIES/
# HadISDH.landq.3.0.0.2016p_FLATgridIDPHA5by5_anoms7605_JAN2017_areaTS_19732016.nc
# HadISDH.blendq.1.0.0.2016p_FULL_anoms8110_JAN2017_areaTS_19732016.nc
# HadISDH.blendq.1.0.0.2016p_FULLship_anoms8110_JAN2017_areaTS_19732016.nc
# HadISDH.marineq.1.0.0.2016p_OBSclim2BClocal_anoms8110_JAN2017_areaTS_19732016.nc
# HadISDH.marineq.1.0.0.2016p_OBSclim2BClocalship_anoms8110_JAN2017_areaTS_19732016.nc
#
# -----------------------
# VERSION/RELEASE NOTES
# -----------------------
#
# Version 2 (5 November 2020)
# ---------
#
# Enhancements
# Now runs from command line
# Now works with ERA5 anoms and masks if desired.
#
# Changes
#
# Bug fixes
#
#
# Version 1 (15 April 2019)
# ---------
#
# Enhancements
#
# Changes
#
# Bug fixes
#
# -----------------------
# OTHER INFORMATION
# -----------------------
# Based on original IDL code by Kate Willett make_area_avg_ts.pro
############################################################################
# Modules
from datetime import datetime
import numpy as np
from matplotlib.dates import date2num,num2date
import sys, os, getopt
from scipy.optimize import curve_fit,fsolve,leastsq
from scipy import pi,sqrt,exp
from scipy.special import erf
import scipy.stats
from math import sqrt,pi,radians,sin,cos,acos
import struct
from netCDF4 import Dataset
from netCDF4 import stringtoarr # for putting strings in as netCDF variables
import pdb
# Kates:
import TestLeap
from ReadNetCDF import GetGrid4
from ReadNetCDF import GetGrid4Slice
from GetNiceTimes import MakeDaysSince
# Start and end years of complete dataset if HardWire = 1
startyr = 1973 # Candidate DAtaset start year - reset later if ERA or by HardWire=0
styrh = 1973 # HadISDH start year (for masking) reset by HardWire=0
edyr = 2020 # reset by HardWire=0
# Which climatology period to work with?
climST = str(1981) #1976 or 1981
climED = str(2010) #2005 or 2010
climBIT = 'anoms'+climST[2:4]+climED[2:4]
# Dataset version if HardWire = 1
lversion = '4.3.0.2020f'
mversion = '1.1.0.2020f'
bversion = '1.1.0.2020f'
# HARDWIRED SET UP!!!
# If HardWire = 1 then program reads from the above run choices
# If HardWire = 0 then program reads in from F1_HadISDHBuildConfig.txt
HardWire = 0
if (HardWire == 0):
#' Read in the config file to get all of the info
with open('F1_HadISDHBuildConfig.txt') as f:
ConfigDict = dict(x.rstrip().split('=', 1) for x in f)
lversion = ConfigDict['VersionDots']
mversion = ConfigDict['MVersionDots']
bversion = ConfigDict['BVersionDots']
startyr = int(ConfigDict['StartYear'])
edyr = int(ConfigDict['EndYear'])
# Note that ConfigDict is still held in memory and contains all the Global Attribute Elements for the output NetCDF File
mdi = -1e30 # missing data indicator
#
############################################################################
# SUBROUTINES #
############################################################################
# AreaMean
def AreaMean(DataField,TheLats,MaskField=None,Cover=None):
'''
This function computes the spatial area average using cosine weighting of the latitudes
Its based on original IDL code by Tim Osborn globalmean.pro
Computes a global mean from a field (or set of fields if fd is 3D),
accounting for missing data.
A separate mask (of same dimension) can (has to be at the moment) be supplied if required.
Single hemisphere means can also be returned (not at the moment)
The number of boxes with data is returned in cover (not at the moment)
mdi is passed from above
DOES NOT ASSUME MASK IS IDENTICAL FOR EACH TIME STEP!!!
INPUTS:
DataField[:,:,:] - time, lat, lon np.array of data - can cope with missing data (mdi)
TheLats[:] - np.array of latitudes
Optional:
MaskField[:,:,:] - if not supplied then average computed over entire DataField
Cover[:] - if supplied then the number of boxes non-missing is returned per time step
OUTPUTS:
DataTS[:] - np.array time series of area averages
'''
# Find dimensions
fullsize = np.shape(DataField)
if (len(fullsize) < 2) | (len(fullsize) > 3):
print('DataField must be 2D or 3D')
pdb.set_trace()
# Set up dimensions depending on whether its 2D or 3D
if (len(fullsize) == 3):
Ntims = fullsize[0]
Nlons = fullsize[2] #nx = fullsize(1)
Nlats = fullsize[1] #ny = fullsize(2)
else:
Ntims = 1
Nlons = fullsize[1] #nx = fullsize(1)
Nlats = fullsize[0] #ny = fullsize(2)
# if a mask is supplied then use to remove points from fd
masksize = np.shape(MaskField)
if (len(masksize) > 0):
if (len(masksize) != len(fullsize)) & (len(masksize) != 2):
print('Mask is wrong size')
pdb.set_trace()
# Set up dimensions depending on whether its 2D or 3D
if (len(masksize) == 3):
if (masksize[0] != Ntims):
print('Mask is wrong size')
pdb.set_trace()
if (masksize[2] != Nlons) | (masksize[1] != Nlats):
print('Mask is wrong size')
pdb.set_trace()
Ntimsmask = masksize[0]
else:
if (masksize[1] != Nlons) | (masksize[0] != Nlats):
print('Mask is wrong size')
pdb.set_trace()
Ntimsmask = 1
# In the case of no mask then compute over all boxes
else:
Ntimsmask = 1
MaskField = np.empty((Nlats,Nlons),dtype = float) # IDL was lons,lats
# Now make arrays
# IDL code below but it seems redundant to me because data was already lon, lat, time in IDL
# In python its time, lat, lon!!!
#fd = reform(fd,nx,ny,nz)
#mask=reform(mask,nx,ny,nzmask)
sumval = np.zeros(Ntims,dtype = float)
sumarea = np.zeros(Ntims,dtype = float)
# # For southern hemisphere component
# sval = np.zeros(Ntims,dtype = float)
# sarea = np.zeros(Ntims,dtype = float)
# # For northern hemisphere component
# nval = np.zeros(Ntims,dtype = float)
# narea = np.zeros(Ntims,dtype = float)
# If Cover exists then set up for filling it
CoverTest = np.shape(Cover)
if (len(CoverTest) > 0):
# For number of non-mdi boxes contributing
Cover = np.zeros(Ntims,dtype = float)
# print('Test AreaMean set up so far')
# pdb.set_trace()
# If the MaskField has been supplied then it should have the same dimensions as DataField
# DOes not assume that mask is identical for each time step
for ln in range(Nlons): #i
for lt in range(Nlats): #j
# print(ln,lt)
# Is this lat/lon a 1 or an mdi in the mask - 1 = compute!
temp_data = np.copy(DataField[:,lt,ln])
CarryOn = 0
if (Ntims == Ntimsmask):
temp_mask = np.copy(MaskField[:,lt,ln])
mask_cover = np.where(temp_mask == mdi)
if (len(mask_cover[0]) > 0):
temp_data[mask_cover] = mdi #
kl = np.where(temp_data != mdi)
CarryOn = 1
else:
if (MaskField[lt,ln] != mdi):
kl = np.where(temp_data != mdi)
CarryOn = 1
if (CarryOn == 1) & (len(kl) > 0):
# print('Test kl values and how this bit works')
# pdb.set_trace()
sumval[kl] = sumval[kl] + temp_data[kl]*cos(radians(TheLats[lt]))
sumarea[kl] = sumarea[kl] + cos(radians(TheLats[lt]))
if (len(CoverTest) > 0):
Cover[kl] = Cover[kl] + 1.
# if (TheLats[lt] < 0.):
# sval[kl] = sval[kl] + DataField[kl,lt,ln]*cos(radians(TheLats[lt]))
# sarea[kl] = sarea[kl] + cos(radians(TheLats[lt]))
# else:
# nval[kl] = nval[kl] + DataField[kl,lt,ln]*cos(radians(TheLats[lt]))
# narea[kl] = narea[kl] + cos(radians(TheLats[lt]))
gots = np.where(sumarea > 0)
if (len(gots[0]) > 0):
sumval[gots] = sumval[gots] / sumarea[gots]
misses = np.where(sumarea == 0)
if (len(misses[0]) > 0):
sumval[misses] = mdi
if (Ntims == 1): # convert to scalars
sumval = sumval[0]
if (len(CoverTest) > 0):
return sumval, Cover
else:
return sumval
############################################################################
# WriteNetCDF
def WriteNetCDF(Filename,TheGArray,TheNHArray,TheTArray,TheSHArray,TheTimes,TheStYr, TheEdYr, TheClimStart, TheClimEnd, TheName, TheStandardName, TheLongName, TheUnit, TheRegions):
'''
This function writes out a NetCDF 4 file
INPUTS:
Filename - string file name
TheGArray[:] - time array of global average values
TheNHArray[:] - time array of nhem average values
TheTArray[:] - time array of tropical average values
TheSHArray[:] - time array of shem average values
TheTimes[:] - times in days since TheStYr, Jan 1st
TheStYr - integer start year assumes Jan start
TheEdYr - integer end year assumes Dec start
TheClimStart - integer start of clim Jan start
TheClimEnd - integer end of clim Dec start
TheName - string short name of var q2m
TheStandardName - string standard name of variable
TheUnit - string unit of variable
TheRegions - dictionary with G, NH, T and SH [lower lat, upper lat] boundaries
OUTPUTS:
None
'''
# No need to convert float data using given scale_factor and add_offset to integers - done within writing program (packV = (V-offset)/scale
# Not sure what this does to float precision though...
# Create a new netCDF file - have tried zlib=True,least_significant_digit=3 (and 1) - no difference
ncfw = Dataset(Filename+'.nc','w',format='NETCDF4_CLASSIC') # need to try NETCDF4 and also play with compression but test this first
# Set up the dimension names and quantities
ncfw.createDimension('time',len(TheTimes))
# Go through each dimension and set up the variable and attributes for that dimension if needed
MyVarT = ncfw.createVariable('time','f4',('time',))
MyVarT.standard_name = 'time'
MyVarT.long_name = 'time'
MyVarT.units = 'days since '+str(TheStYr)+'-1-1 00:00:00'
MyVarT.start_year = str(TheStYr)
MyVarT.end_year = str(TheEdYr)
MyVarT[:] = TheTimes
# Go through each variable and set up the variable attributes
# I've added zlib=True so that the file is in compressed form
# I've added least_significant_digit=4 because we do not need to store information beyone 4 significant figures.
MyVarG = ncfw.createVariable('glob_'+TheName+'_anoms','f4',('time',),fill_value = mdi,zlib=True,least_significant_digit=4)
#MyVarG.standard_name = TheStandardName
MyVarG.long_name = TheLongName+' global average anomalies '+'%5.1f' % (TheRegions['G'][0])+' to '+'%5.1f' % (TheRegions['G'][1])
MyVarG.units = TheUnit
# MyVarG.valid_min = np.min(TheGArray)
# MyVarG.valid_max = np.max(TheGArray)
# MyVarG.missing_value = mdi
# Provide the data to the variable - depending on howmany dimensions there are
MyVarG[:] = TheGArray[:]
MyVarN = ncfw.createVariable('nhem_'+TheName+'_anoms','f4',('time',),fill_value = mdi,zlib=True,least_significant_digit=4)
#MyVarN.standard_name = TheStandardName
MyVarN.long_name = TheLongName+' northern hemisphere average anomalies '+'%5.1f' % (TheRegions['NH'][0])+' to '+'%5.1f' % (TheRegions['NH'][1])
MyVarN.units = TheUnit
# MyVarN.valid_min = np.min(TheNHArray)
# MyVarN.valid_max = np.max(TheNHArray)
# MyVarN.missing_value = mdi
# Provide the data to the variable - depending on howmany dimensions there are
MyVarN[:] = TheNHArray[:]
MyVarT = ncfw.createVariable('trop_'+TheName+'_anoms','f4',('time',),fill_value = mdi,zlib=True,least_significant_digit=4)
#MyVarT.standard_name = TheStandardName
MyVarT.long_name = TheLongName+' tropical average anomalies '+'%5.1f' % (TheRegions['T'][0])+' to '+'%5.1f' % (TheRegions['T'][1])
MyVarT.units = TheUnit
# MyVarT.valid_min = np.min(TheTArray)
# MyVarT.valid_max = np.max(TheTArray)
# MyVarT.missing_value = mdi
# Provide the data to the variable - depending on howmany dimensions there are
MyVarT[:] = TheTArray[:]
MyVarS = ncfw.createVariable('shem_'+TheName+'_anoms','f4',('time',),fill_value = mdi,zlib=True,least_significant_digit=4)
#MyVarS.standard_name = TheStandardName
MyVarS.long_name = TheLongName+' southern hemisphere average anomalies '+'%5.1f' % (TheRegions['SH'][0])+' to '+'%5.1f' % (TheRegions['SH'][1])
MyVarS.units = TheUnit
# MyVarS.valid_min = np.min(TheSHArray)
# MyVarS.valid_max = np.max(TheSHArray)
# MyVarS.missing_value = mdi
# Provide the data to the variable - depending on howmany dimensions there are
MyVarS[:] = TheSHArray[:]
ncfw.close()
return
############################################################################
# WriteText
def WriteText(Filename,TheGArray,TheNHArray,TheTArray,TheSHArray,TheTimes,TheStYr,TheEdYr):
'''
This function writes out two files with year or yearmonth and then Global, N Hemi, Tropics and S Hemi time series
There has to be at least 11 months of the year present to calculate an annual value
'''
# Minimum missing data threshold
MinThresh = 11
# Check for mdi (which is currently -1e30 and change to -999.99
Amdi = -99.99
TheGArray[np.where(TheGArray == mdi)] = Amdi # tp catch floating point oddity (-1e30 + 1 is still a ridiculously large -ve number)
TheNHArray[np.where(TheNHArray == mdi)] = Amdi # tp catch floating point oddity (-1e30 + 1 is still a ridiculously large -ve number)
TheTArray[np.where(TheTArray == mdi)] = Amdi # tp catch floating point oddity (-1e30 + 1 is still a ridiculously large -ve number)
TheSHArray[np.where(TheSHArray == mdi)] = Amdi # tp catch floating point oddity (-1e30 + 1 is still a ridiculously large -ve number)
# Open the file for annual and monthly
ann = open(Filename+'_annual.dat', "a")
mon = open(Filename+'_monthly.dat', "a")
# Write the file header
ann.write("DATE GLOBAL N_HEMI TROPICS S_HEMI\n")
mon.write(" DATE GLOBAL N_HEMI TROPICS S_HEMI\n")
# Loop through each year and month and write out
yy = 0
mm = 0
for tt in range(len(TheTimes)):
# Write monthlies to file
m = '%02i' % (mm+1)
# pdb.set_trace()
mon.write('{:4d}{:2s} {:6.2f} {:6.2f} {:6.2f} {:6.2f}\n'.format(yy+TheStYr,m,TheGArray[tt],TheNHArray[tt],TheTArray[tt],TheSHArray[tt]))
mm = mm+1
if (mm == 12):
# Get annual mean value and write to file
TmpArr = TheGArray[tt-11:tt+1]
gots = np.where(TmpArr > Amdi)
if (len(gots[0]) >= MinThresh):
TheGVal = np.mean(TmpArr[gots])
else:
TheGVal = Amdi
TmpArr = TheNHArray[tt-11:tt+1]
gots = np.where(TmpArr > Amdi)
if (len(gots[0]) >= MinThresh):
TheNHVal = np.mean(TmpArr[gots])
else:
TheNHVal = Amdi
TmpArr = TheTArray[tt-11:tt+1]
gots = np.where(TmpArr > Amdi)
if (len(gots[0]) >= MinThresh):
TheTVal = np.mean(TmpArr[gots])
else:
TheTVal = Amdi
TmpArr = TheSHArray[tt-11:tt+1]
gots = np.where(TmpArr > Amdi)
if (len(gots[0]) >= MinThresh):
TheSHVal = np.mean(TmpArr[gots])
else:
TheSHVal = Amdi
ann.write('{:4d} {:6.2f} {:6.2f} {:6.2f} {:6.2f}\n'.format(yy+TheStYr,TheGVal, TheNHVal, TheTVal, TheSHVal))
yy = yy+1
mm = 0
# CLose the files
ann.close()
mon.close()
return
############################################################################
# MAIN #
############################################################################
def main(argv):
# INPUT PARAMETERS AS STRINGS!!!!
var = 'q' # 'q','rh','e','td','tw','t','dpd'
typee = 'LAND' # 'LAND','RAW','OTHER', 'BLEND', 'BLENDSHIP', 'MARINE', 'MARINESHIP' # domain does not need to be set correctly!!!
# can also be 'ERA5' 'ERA5LAND','ERA5MARINE' 'ERA5MARINEMASK' ERA5LANDMASK'
year1 = '1973' # Start year of trend
year2 = '2018' # End year of trend
try:
opts, args = getopt.getopt(argv, "hi:",
["var=","typee=","year1=","year2="])
except getopt.GetoptError:
print('Usage (as strings) MakeGridTrends.py --var <q> --typee <IDPHA> --year1 <1973> --year2 <2018>')
sys.exit(2)
for opt, arg in opts:
if opt == "--var":
try:
var = arg
except:
sys.exit("Failed: var not a string")
elif opt == "--typee":
try:
typee = arg
except:
sys.exit("Failed: typee not a string")
elif opt == "--year1":
try:
year1 = arg
except:
sys.exit("Failed: year1 not an integer")
elif opt == "--year2":
try:
year2 = arg
except:
sys.exit("Failed: year2 not an integer")
assert year1 != -999 and year2 != -999, "Year not specified."
print(var,typee,year1, year2)
# *** CHOOSE WHETHER TO WORK WITH ANOMALIES OR ACTUALS - COULD ADD RENORMALISATION IF DESIRED ***
isanom = True # 'false' for actual values, 'true' for anomalies
# What domain?
if (typee == 'MARINE') | (typee == 'MARINESHIP') | (typee == 'ERA5MARINE') | (typee == 'ERA5MARINEMASK'):
domain = 'marine'
version = mversion
elif (typee == 'BLEND') | (typee == 'BLENDSHIP') | (typee == 'ERA5') | (typee == 'ERA5MASK'):
domain = 'blend'
version = bversion
else:
domain = 'land'
version = lversion
# Set up the trend years
sttrd = int(year1)
edtrd = int(year2)
# Latitude and Longitude gridbox width and variable names
latlg = 5.
lonlg = 5.
#latlg = 1.
#lonlg = 1.
LatInfo = ['latitude']
LonInfo = ['longitude']
# SEt up area average masks
MaskDict = dict([('G',[-70.,70.]),
('NH',[20.,70.]),
('T',[-20.,20.]),
('SH',[-70.,-20.])])
# Time and dimension variables
# nyrs = (edyr+1)-styr
# nmons = nyrs*12
nyrs = (edtrd+1)-sttrd
nmons = nyrs*12
stlt = -90+(latlg/2.)
stln = -180+(lonlg/2.)
nlats = int(180/latlg)
nlons = int(360/lonlg)
lats = (np.arange(nlats)*latlg) + stlt
lons = (np.arange(nlons)*lonlg) + stln
WORKINGDIR = '/scratch/hadkw/UPDATE20'+str(edyr)[2:4]
# WORKINGDIR = '/data/users/hadkw/WORKING_HADISDH/UPDATE20'+str(edyr)[2:4]
indir = WORKINGDIR+'/STATISTICS/GRIDS/'
outdir = WORKINGDIR+'/STATISTICS/TIMESERIES/'
# If we're working with ERA5 then set INDIR to OTHERDATA
if (typee.find('ERA5') >= 0):
indir = WORKINGDIR+'/OTHERDATA/'
indirH = WORKINGDIR+'/STATISTICS/GRIDS/'
# END OF EDITABLES**********************************************************
# Dictionaries for filename and other things
ParamDict = dict([('q',['q','q2m','g/kg']),
('rh',['RH','rh2m','%rh']),
('t',['T','t2m','deg C']),
('td',['Td','td2m','deg C']),
('tw',['Tw','tw2m','deg C']),
('e',['e','e2m','hPa']),
('dpd',['DPD','dpd2m','deg C']),
('evap',['q','evap','cm w.e.'])])
# Dictionary for looking up variable standard (not actually always standard!!!) names for netCDF output of variables
NameDict = dict([('q',['specific_humidity',' decadal trend in specific humidity anomaly ('+climST+' to '+climED+' base period)']),
('rh',['relative_humidity',' decadal trend in relative humidity anomaly ('+climST+' to '+climED+' base period)']),
('e',['vapour_pressure',' decadal trend in vapour pressure anomaly ('+climST+' to '+climED+' base period)']),
('tw',['wetbulb_temperature',' decadal trend in wetbulb temperature anomaly ('+climST+' to '+climED+' base period)']),
('t',['drybulb_temperature',' decadal trend in dry bulb temperature anomaly ('+climST+' to '+climED+' base period)']),
('td',['dewpoint_temperature',' decadal trend in dew point temperature anomaly ('+climST+' to '+climED+' base period)']),
('dpd',['dewpoint depression',' decadal trend in dew point depression anomaly ('+climST+' to '+climED+' base period)']),
('evap',['evaporation',' decadal trend in evaporation anomaly ('+climST+' to '+climED+' base period)'])])
# unitees = ParamDict[param][2]
# varname = param
unitees = ParamDict[var][2]
varname = var
if domain == 'land':
DatTyp = 'IDPHA'
if (var == 'dpd'):
DatTyp = 'PHA'
if (var == 'td'):
DatTyp = 'PHADPD'
fileblurb = 'FLATgridHOM5by5'
# fileblurb = 'FLATgrid'+DatTyp+'5by5'
elif domain == 'marine':
if (typee == 'MARINE'):
fileblurb = 'BClocal5by5both'
elif (typee == 'MARINESHIP') | (typee == 'ERA5MARINEMASK') | (typee == 'ERA5MARINE'):
fileblurb = 'BClocalSHIP5by5both'
elif domain == 'blend':
DatTyp = 'IDPHA'
if (var == 'dpd'):
DatTyp = 'PHA'
if (var == 'td'):
DatTyp = 'PHADPD'
if (typee == 'BLEND'):
fileblurb = 'FLATgridHOMBClocalboth5by5'
# fileblurb = 'FLATgrid'+DatTyp+'BClocalboth5by5'
elif (typee == 'BLENDSHIP') | (typee == 'ERA5MASK') | (typee == 'ERA5'):
fileblurb = 'FLATgridHOMBClocalSHIPboth5by5'
# fileblurb = 'FLATgrid'+DatTyp+'BClocalSHIPboth5by5'
inlandcover = WORKINGDIR+'/OTHERDATA/HadCRUT.4.3.0.0.land_fraction.nc'
infile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT
# infile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_'+thenmon+thenyear+'_cf'
outfile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_areaTS_'+str(sttrd)+str(edtrd) #70S-70N
if (typee.find('ERA5') >= 0):
infile = var+'2m_monthly_5by5_ERA5_1979'+str(edyr)
outfile = var+'2m_monthly_5by5_ERA5_'+climBIT+'_areaTS_'+str(sttrd)+str(edtrd) #70S-70N
infileH = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT
# infileH = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_'+thenmon+thenyear+'_cf'
outfileH = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_areaTS_'+str(sttrd)+str(edtrd) #70S-70N
# Removed the nowmonnowyear thenmonthenyear bits
# infile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_'+thenmon+thenyear+'_cf'
# outfile = 'HadISDH.'+domain+ParamDict[var][0]+'.'+version+'_'+fileblurb+'_'+climBIT+'_'+nowmon+nowyear+'_areaTS_'+str(sttrd)+str(edtrd) #70S-70N
if (isanom == False):
outfile = outfile+'_ABS'
# Get Data
styr = startyr
if (typee.find('ERA') >= 0):
styr = 1979
if (isanom == True):
if (domain == 'land'):
ReadInfo = [var+'2m_anoms_land','time']
outfile = outfile+'_land'
if (domain == 'marine'):
ReadInfo = [var+'2m_anoms_ocean','time']
outfile = outfile+'_marine'
if (domain == 'blend'):
ReadInfo = [var+'2m_anoms','time']
ReadInfoH = [var+'_anoms','time']
else:
ReadInfo = [var+'2m','time']
ReadInfoH = [var+'_abs','time']
else:
if (isanom == True):
ReadInfo = [var+'_anoms','time']
else:
ReadInfo = [var+'_abs','time']
## read in files
#LatInfo = ['latitude']
#LonInfo = ['longitude']
#
#if (isanom == True):
# if (homogtype == 'ERA-Interim') | (homogtype == 'ERA5'):
# if (domain == 'land'):
# ReadInfo = [varname+'_anoms_land','time']
# outfile = outfile+'_land'
# if (domain == 'marine'):
# ReadInfo = [varname+'_anoms_ocean','time']
# outfile = outfile+'_marine'
# else:
# ReadInfo = [varname+'_anoms','time']
#else:
# if (homogtype == 'ERA-Interim') | (homogtype == 'ERA5'):
# if (domain == 'land'):
# ReadInfo = [varname+'_land','time']
# outfile = outfile+'_land'
# if (domain == 'marine'):
# ReadInfo = [varname+'_ocean','time']
# outfile = outfile+'_land'
# else:
# ReadInfo = [varname+'_abs','time']#
#
print('Reading in the data for :',typee)
#print('Reading in the data for :',homogtype)
# TmpVals,Latitudes,Longitudes = GetGrid4(infile,ReadInfo,LatInfo,LonInfo)
TmpVals,Latitudes,Longitudes = GetGrid4(indir+infile+'.nc',ReadInfo,LatInfo,LonInfo)
# Seperate out data and times
TheData = TmpVals[0]
Times = TmpVals[1]
TmpVals = []
# Check the mdis = IDL output netCDF differs from Python output
bads = np.where(TheData < -10000)
if (len(bads[0]) > 0):
TheData[bads] = mdi
# If we're masking ERA then read in HadISDH
if (typee.find('MASK') >= 0):
print('Masking ERA5')
outfile = outfile+'_mask'
TmpValsH,LatitudesH,LongitudesH = GetGrid4(indirH+infileH+'.nc',ReadInfoH,LatInfo,LonInfo)
# Seperate out data and times
TheDataH = TmpValsH[0]
TimesH = TmpValsH[1]
TmpValsH = []
# Check the mdis = IDL output netCDF differs from Python output
bads = np.where(TheDataH < -10000)
if (len(bads[0]) > 0):
TheDataH[bads] = mdi
# Make HadISDH start in the same years
TheDataH = TheDataH[(styr-styrh)*12:((edyr-styrh) + 1)*12,:,:]
# Now mask the ERA data with HadISDH missing data
TheData[np.where(TheDataH == mdi)] = mdi
## Now if we're masking then read in the mask for the time slice of ERA-Interim
#if (mask == True):
#
# SliceInfo = dict([('TimeSlice',[mskstpt,mskedpt]),
# ('LatSlice',[0,nlats]),
# ('LonSlice',[0,nlons])])
#
# if (isanom == True):
# ReadInfo = [param+'_anoms']
# else:
# ReadInfo = [param+'_abs']#
#
# print('Reading in the mask data for :',typee)
# print('Reading in the mask data for :',homogtype)
# TmpVals,Latitudes,Longitudes = GetGrid4Slice(maskfile,ReadInfo,SliceInfo,LatInfo,LonInfo)
#
# # Seperate out data and times
# MSKTheData = TmpVals
## MSKTimes = TmpVals[1]
# TmpVals = []
#
# # Check the mdis = IDL output netCDF differs from Python output
# bads = np.where(MSKTheData < -10000)
# if (len(bads[0]) > 0):
# MSKTheData[bads] = mdi
#
# # mask out points in candidate that do not have data in the mask
# bads = np.where(MSKTheData <= mdi)
## pdb.set_trace()
# if (len(bads[0]) > 0):
# TheData[bads] = mdi
## # make anomalies from the monthlies if you want to be precise about anomalising with same coverage as HadISDH
## newq_values=make_array(nlons,nlats,nmons,/float,value=mdi)
## FOR ltt=0,nlats-1 DO BEGIN
## FOR lnn=0,nlons-1 DO BEGIN
## subarr=REFORM(q_values(lnn,ltt,*),12,nyrs)
## FOR mm=0,11 DO BEGIN
## gots=WHERE(subarr(mm,*) NE mdi,count)
## climsub=subarr(mm,mclimst-styr:mclimst-styr)
## gotsC=WHERE(climsub NE mdi,countC)
## IF (countC GE 15) THEN subarr(mm,gots)=subarr(mm,gots)-MEAN(climsub(gotsC)) ELSE subarr(mm,*)=mdi
## ENDFOR
## newq_values(lnn,ltt,*)=REFORM(subarr,nmons)
## ENDFOR
## ENDFOR
## #stop
## q_values=newq_values
# make spatial area masks - set anything greater than 70 deg lat to mdi
global_mask = np.zeros((nlats,nlons),dtype = float)
global_mask.fill(1)
nhem_mask = np.copy(global_mask)
shem_mask = np.copy(global_mask)
trop_mask = np.copy(global_mask)
for deg in range(nlats):
if (lats[deg] < MaskDict['G'][0]) | (lats[deg] > MaskDict['G'][1]):
global_mask[deg,:] = mdi
if (lats[deg] < MaskDict['NH'][0]) | (lats[deg] > MaskDict['NH'][1]):
nhem_mask[deg,:] = mdi
if (lats[deg] < MaskDict['T'][0]) | (lats[deg] > MaskDict['T'][1]):
trop_mask[deg,:] = mdi
if (lats[deg] < MaskDict['SH'][0]) | (lats[deg] > MaskDict['SH'][1]):
shem_mask[deg,:] = mdi
global_mask_3d = np.repeat(global_mask[np.newaxis,:,:],nmons, axis = 0)
nhem_mask_3d = np.repeat(nhem_mask[np.newaxis,:,:],nmons, axis = 0)
shem_mask_3d = np.repeat(shem_mask[np.newaxis,:,:],nmons, axis = 0)
trop_mask_3d = np.repeat(trop_mask[np.newaxis,:,:],nmons, axis = 0)
##CoverTS = np.empty(nmons,dtype = float)
##CoverTS.fill(mdi)
##glob_avg_ts,CoverTS = AreaMean(TheData,lats,global_mask_3d,CoverTS)
glob_avg_ts = AreaMean(TheData,lats,global_mask_3d)
print(len(glob_avg_ts),np.max(glob_avg_ts),np.min(glob_avg_ts))
#pdb.set_trace()
nhem_avg_ts = AreaMean(TheData,lats,nhem_mask_3d)
print(len(nhem_avg_ts),np.max(nhem_avg_ts),np.min(nhem_avg_ts))
trop_avg_ts = AreaMean(TheData,lats,trop_mask_3d)
print(len(trop_avg_ts),np.max(trop_avg_ts),np.min(trop_avg_ts))
shem_avg_ts = AreaMean(TheData,lats,shem_mask_3d)
print(len(shem_avg_ts),np.max(shem_avg_ts),np.min(shem_avg_ts))
# save to file as netCDF and .dat
# WriteNetCDF(outfile,glob_avg_ts,nhem_avg_ts,trop_avg_ts,shem_avg_ts,Times,styr, edyr, climst, climed, ParamDict[param][0], StandardNameDict[param], LongNameDict[param], unitees, MaskDict)
WriteNetCDF(outdir+outfile,glob_avg_ts,nhem_avg_ts,trop_avg_ts,shem_avg_ts,Times,styr, edyr, climST, climED, ParamDict[var][0], NameDict[var][0], NameDict[var][1], unitees, MaskDict)
# WriteText(outfile,glob_avg_ts,nhem_avg_ts,trop_avg_ts,shem_avg_ts,Times,styr, edyr)
WriteText(outdir+outfile,glob_avg_ts,nhem_avg_ts,trop_avg_ts,shem_avg_ts,Times,styr, edyr)
# Note if any of the series have missing data because at these large scales they should not
if (len(np.where(glob_avg_ts <= mdi)[0]) > 0):
print('Missing months for Global average: ',len(np.where(glob_avg_ts <= mdi)[0]))
pdb.set_trace()
if (len(np.where(nhem_avg_ts <= mdi)[0]) > 0):
print('Missing months for NHemi average: ',len(np.where(nhem_avg_ts <= mdi)[0]))
pdb.set_trace()
if (len(np.where(trop_avg_ts <= mdi)[0]) > 0):
print('Missing months for Tropics average: ',len(np.where(trop_avg_ts <= mdi)[0]))
pdb.set_trace()
if (len(np.where(shem_avg_ts <= mdi)[0]) > 0):
print('Missing months for Shemi average: ',len(np.where(shem_avg_ts <= mdi)[0]))
# pdb.set_trace()
print('And we are done!')
if __name__ == '__main__':
main(sys.argv[1:])
| Kate-Willett/HadISDH_Build | MakeAreaAvgTS.py | Python | cc0-1.0 | 34,336 | [
"NetCDF"
] | d4069e84682da43719eee17e6dd88b466d79fa93c6f0622e5cd834d85202a3cb |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# A simple example of a three-dimensional noise pattern.
# first we load in the standard vtk packages into tcl
perlin = vtk.vtkPerlinNoise()
perlin.SetFrequency(2,1.25,1.5)
perlin.SetPhase(0,0,0)
sample = vtk.vtkSampleFunction()
sample.SetImplicitFunction(perlin)
sample.SetSampleDimensions(65,65,20)
sample.ComputeNormalsOff()
surface = vtk.vtkContourFilter()
surface.SetInputConnection(sample.GetOutputPort())
surface.SetValue(0,0.0)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(surface.GetOutputPort())
mapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.2,0.4,0.6)
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(actor)
ren1.SetBackground(1,1,1)
renWin.SetSize(300,300)
ren1.ResetCamera()
ren1.GetActiveCamera().Dolly(1.35)
ren1.ResetCameraClippingRange()
iren.Initialize()
# render the image
#
renWin.Render()
# prevent the tk window from showing up then start the event loop
# --- end of script --
| hlzz/dotfiles | graphics/VTK-7.0.0/Common/DataModel/Testing/Python/PerlinNoise.py | Python | bsd-3-clause | 1,332 | [
"VTK"
] | b03f738d9053b553567ff8b7b1ff44329deb78cf70b501f6932f6e1a79ea9ce2 |
"""
Browser set up for acceptance tests.
"""
# pylint: disable=no-member
# pylint: disable=unused-argument
from lettuce import before, after, world
from splinter.browser import Browser
from logging import getLogger
from django.core.management import call_command
from django.conf import settings
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import requests
from base64 import encodestring
from json import dumps
import xmodule.modulestore.django
from xmodule.contentstore.django import _CONTENTSTORE
LOGGER = getLogger(__name__)
LOGGER.info("Loading the lettuce acceptance testing terrain file...")
MAX_VALID_BROWSER_ATTEMPTS = 20
GLOBAL_SCRIPT_TIMEOUT = 60
def get_saucelabs_username_and_key():
"""
Returns the Sauce Labs username and access ID as set by environment variables
"""
return {"username": settings.SAUCE.get('USERNAME'), "access-key": settings.SAUCE.get('ACCESS_ID')}
def set_saucelabs_job_status(jobid, passed=True):
"""
Sets the job status on sauce labs
"""
config = get_saucelabs_username_and_key()
url = 'http://saucelabs.com/rest/v1/{}/jobs/{}'.format(config['username'], world.jobid)
body_content = dumps({"passed": passed})
base64string = encodestring('{}:{}'.format(config['username'], config['access-key']))[:-1]
headers = {"Authorization": "Basic {}".format(base64string)}
result = requests.put(url, data=body_content, headers=headers)
return result.status_code == 200
def make_saucelabs_desired_capabilities():
"""
Returns a DesiredCapabilities object corresponding to the environment sauce parameters
"""
desired_capabilities = settings.SAUCE.get('BROWSER', DesiredCapabilities.CHROME)
desired_capabilities['platform'] = settings.SAUCE.get('PLATFORM')
desired_capabilities['version'] = settings.SAUCE.get('VERSION')
desired_capabilities['device-type'] = settings.SAUCE.get('DEVICE')
desired_capabilities['name'] = settings.SAUCE.get('SESSION')
desired_capabilities['build'] = settings.SAUCE.get('BUILD')
desired_capabilities['video-upload-on-pass'] = False
desired_capabilities['sauce-advisor'] = False
desired_capabilities['capture-html'] = True
desired_capabilities['record-screenshots'] = True
desired_capabilities['selenium-version'] = "2.34.0"
desired_capabilities['max-duration'] = 3600
desired_capabilities['public'] = 'public restricted'
return desired_capabilities
@before.harvest
def initial_setup(server):
"""
Launch the browser once before executing the tests.
"""
world.absorb(settings.LETTUCE_SELENIUM_CLIENT, 'LETTUCE_SELENIUM_CLIENT')
if world.LETTUCE_SELENIUM_CLIENT == 'local':
browser_driver = getattr(settings, 'LETTUCE_BROWSER', 'chrome')
if browser_driver == 'chrome':
desired_capabilities = DesiredCapabilities.CHROME
desired_capabilities['loggingPrefs'] = {
'browser': 'ALL',
}
else:
desired_capabilities = {}
# There is an issue with ChromeDriver2 r195627 on Ubuntu
# in which we sometimes get an invalid browser session.
# This is a work-around to ensure that we get a valid session.
success = False
num_attempts = 0
while (not success) and num_attempts < MAX_VALID_BROWSER_ATTEMPTS:
# Load the browser and try to visit the main page
# If the browser couldn't be reached or
# the browser session is invalid, this will
# raise a WebDriverException
try:
if browser_driver == 'firefox':
# Lettuce initializes differently for firefox, and sending
# desired_capabilities will not work. So initialize without
# sending desired_capabilities.
world.browser = Browser(browser_driver)
else:
world.browser = Browser(browser_driver, desired_capabilities=desired_capabilities)
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
world.visit('/')
except WebDriverException:
LOGGER.warn("Error acquiring %s browser, retrying", browser_driver, exc_info=True)
if hasattr(world, 'browser'):
world.browser.quit()
num_attempts += 1
else:
success = True
# If we were unable to get a valid session within the limit of attempts,
# then we cannot run the tests.
if not success:
raise IOError("Could not acquire valid {driver} browser session.".format(driver=browser_driver))
world.absorb(0, 'IMPLICIT_WAIT')
world.browser.driver.set_window_size(1280, 1024)
elif world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
config = get_saucelabs_username_and_key()
world.browser = Browser(
'remote',
url="http://{}:{}@ondemand.saucelabs.com:80/wd/hub".format(config['username'], config['access-key']),
**make_saucelabs_desired_capabilities()
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
elif world.LETTUCE_SELENIUM_CLIENT == 'grid':
world.browser = Browser(
'remote',
url=settings.SELENIUM_GRID.get('URL'),
browser=settings.SELENIUM_GRID.get('BROWSER'),
)
world.absorb(30, 'IMPLICIT_WAIT')
world.browser.driver.set_script_timeout(GLOBAL_SCRIPT_TIMEOUT)
else:
raise Exception("Unknown selenium client '{}'".format(world.LETTUCE_SELENIUM_CLIENT))
world.browser.driver.implicitly_wait(world.IMPLICIT_WAIT)
world.absorb(world.browser.driver.session_id, 'jobid')
@before.each_scenario
def reset_data(scenario):
"""
Clean out the django test database defined in the
envs/acceptance.py file: edx-platform/db/test_edx.db
"""
LOGGER.debug("Flushing the test database...")
call_command('flush', interactive=False, verbosity=0)
world.absorb({}, 'scenario_dict')
@before.each_scenario
def configure_screenshots(scenario):
"""
Before each scenario, turn off automatic screenshots.
Args: str, scenario. Name of current scenario.
"""
world.auto_capture_screenshots = False
@after.each_scenario
def clear_data(scenario):
world.spew('scenario_dict')
@after.each_scenario
def reset_databases(scenario):
'''
After each scenario, all databases are cleared/dropped. Contentstore data are stored in unique databases
whereas modulestore data is in unique collection names. This data is created implicitly during the scenarios.
If no data is created during the test, these lines equivilently do nothing.
'''
xmodule.modulestore.django.modulestore()._drop_database() # pylint: disable=protected-access
xmodule.modulestore.django.clear_existing_modulestores()
_CONTENTSTORE.clear()
@world.absorb
def capture_screenshot(image_name):
"""
Capture a screenshot outputting it to a defined directory.
This function expects only the name of the file. It will generate
the full path of the output screenshot.
If the name contains spaces, they ill be converted to underscores.
"""
output_dir = '{}/log/auto_screenshots'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, image_name.replace(' ', '_'))
try:
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error("Could not capture a screenshot '{}'".format(image_name))
@after.each_scenario
def screenshot_on_error(scenario):
"""
Save a screenshot to help with debugging.
"""
if scenario.failed:
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
image_name = '{}/{}.png'.format(output_dir, scenario.name.replace(' ', '_'))
world.browser.driver.save_screenshot(image_name)
except WebDriverException:
LOGGER.error('Could not capture a screenshot')
@after.each_scenario
def capture_console_log(scenario):
"""
Save the console log to help with debugging.
"""
if scenario.failed:
log = world.browser.driver.get_log('browser')
try:
output_dir = '{}/log'.format(settings.TEST_ROOT)
file_name = '{}/{}.log'.format(output_dir, scenario.name.replace(' ', '_'))
with open(file_name, 'w') as output_file:
for line in log:
output_file.write("{}{}".format(dumps(line), '\n'))
except WebDriverException:
LOGGER.error('Could not capture the console log')
def capture_screenshot_for_step(step, when):
"""
Useful method for debugging acceptance tests that are run in Vagrant.
This method runs automatically before and after each step of an acceptance
test scenario. The variable:
world.auto_capture_screenshots
either enables or disabled the taking of screenshots. To change the
variable there is a convenient step defined:
I (enable|disable) auto screenshots
If you just want to capture a single screenshot at a desired point in code,
you should use the method:
world.capture_screenshot("image_name")
"""
if world.auto_capture_screenshots:
scenario_num = step.scenario.feature.scenarios.index(step.scenario) + 1
step_num = step.scenario.steps.index(step) + 1
step_func_name = step.defined_at.function.func_name
image_name = "{prefix:03d}__{num:03d}__{name}__{postfix}".format(
prefix=scenario_num,
num=step_num,
name=step_func_name,
postfix=when
)
world.capture_screenshot(image_name)
@before.each_step
def before_each_step(step):
capture_screenshot_for_step(step, '1_before')
@after.each_step
def after_each_step(step):
capture_screenshot_for_step(step, '2_after')
@after.harvest
def saucelabs_status(total):
"""
Collect data for saucelabs.
"""
if world.LETTUCE_SELENIUM_CLIENT == 'saucelabs':
set_saucelabs_job_status(world.jobid, total.scenarios_ran == total.scenarios_passed)
| caesar2164/edx-platform | common/djangoapps/terrain/browser.py | Python | agpl-3.0 | 10,329 | [
"VisIt"
] | 6d3207f9cc47092b9de70591aa67dd8a524145ab6a3af2a6b93f6faa45d34236 |
##Copyright 2008-2013 Jelle Feringa (jelleferinga@gmail.com)
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>
from OCC.Core.BRep import BRep_Tool_Surface, BRep_Tool
from OCC.Core.BRepTopAdaptor import BRepTopAdaptor_FClass2d
from OCC.Core.Geom import Geom_Curve
from OCC.Core.GeomAPI import GeomAPI_ProjectPointOnSurf
from OCC.Core.GeomLib import GeomLib_IsPlanarSurface
from OCC.Core.TopAbs import TopAbs_IN
from OCC.Core.TopExp import topexp
from OCC.Core.TopoDS import TopoDS_Vertex, TopoDS_Face, TopoDS_Edge
from OCC.Core.GeomLProp import GeomLProp_SLProps
from OCC.Core.BRepTools import breptools_UVBounds
from OCC.Core.BRepAdaptor import BRepAdaptor_Surface, BRepAdaptor_HSurface
from OCC.Core.ShapeAnalysis import ShapeAnalysis_Surface
from OCC.Core.GeomProjLib import geomprojlib
from OCC.Core.Adaptor3d import Adaptor3d_IsoCurve
from OCC.Core.gp import gp_Pnt2d, gp_Dir
from .base import BaseObject
from .edge import Edge
from .Construct import TOLERANCE, to_adaptor_3d
from .Topology import Topo, WireExplorer
class DiffGeomSurface(object):
def __init__(self, instance):
self.instance = instance
self._curvature = None
self._curvature_initiated = False
def curvature(self, u, v):
'''returns the curvature at the u parameter
the curvature object can be returned too using
curvatureType == curvatureType
curvatureTypes are:
gaussian
minimum
maximum
mean
curvatureType
'''
if not self._curvature_initiated:
self._curvature = GeomLProp_SLProps(self.instance.surface_handle, u, v, 2, 1e-7)
_domain = self.instance.domain()
if u in _domain or v in _domain:
print('<<<CORRECTING DOMAIN...>>>')
div = 1000
delta_u, delta_v = (_domain[0] - _domain[1])/div, (_domain[2] - _domain[3])/div
if u in _domain:
low, hi = u-_domain[0], u-_domain[1]
if low < hi:
u = u - delta_u
else:
u = u + delta_u
if v in _domain:
low, hi = v-_domain[2], v-_domain[3]
if low < hi:
v = v - delta_v
else:
v = v + delta_v
self._curvature.SetParameters(u, v)
self._curvature_initiated = True
return self._curvature
def gaussian_curvature(self, u, v):
return self.curvature(u, v).GaussianCurvature()
def min_curvature(self, u, v):
return self.curvature(u, v).MinCurvature()
def mean_curvature(self, u, v):
return self.curvature(u, v).MeanCurvature()
def max_curvature(self, u, v):
return self.curvature(u, v).MaxCurvature()
def normal(self, u, v):
# TODO: should make this return a gp_Vec
curv = self.curvature(u, v)
if curv.IsNormalDefined():
return curv.Normal()
else:
raise ValueError('normal is not defined at u,v: {0}, {1}'.format(u, v))
def tangent(self, u, v):
dU, dV = gp_Dir(), gp_Dir()
curv = self.curvature(u, v)
if curv.IsTangentUDefined() and curv.IsTangentVDefined():
curv.TangentU(dU), curv.TangentV(dV)
return dU, dV
else:
return None, None
def radius(self, u, v):
'''returns the radius at u
'''
# TODO: SHOULD WE RETURN A SIGNED RADIUS? ( get rid of abs() )?
try:
_crv_min = 1./self.min_curvature(u, v)
except ZeroDivisionError:
_crv_min = 0.
try:
_crv_max = 1./self.max_curvature(u, v)
except ZeroDivisionError:
_crv_max = 0.
return abs((_crv_min+_crv_max)/2.)
class Face(TopoDS_Face, BaseObject):
"""high level surface API
object is a Face if part of a Solid
otherwise the same methods do apply, apart from the topology obviously
"""
def __init__(self, face):
'''
'''
assert isinstance(face, TopoDS_Face), 'need a TopoDS_Face, got a %s' % face.__class__
assert not face.IsNull()
super(Face, self).__init__()
BaseObject.__init__(self, 'face')
# we need to copy the base shape using the following three
# lines
assert self.IsNull()
self.TShape(face.TShape())
self.Location(face.Location())
self.Orientation(face.Orientation())
assert not self.IsNull()
# cooperative classes
self.DiffGeom = DiffGeomSurface(self)
# STATE; whether cooperative classes are yet initialized
self._curvature_initiated = False
self._geometry_lookup_init = False
#===================================================================
# properties
#===================================================================
self._h_srf = None
self._srf = None
self._adaptor = None
self._adaptor_handle = None
self._classify_uv = None # cache the u,v classifier, no need to rebuild for every sample
self._topo = None
# aliasing of useful methods
def is_u_periodic(self):
return self.adaptor.IsUPeriodic()
def is_v_periodic(self):
return self.adaptor.IsVPeriodic()
def is_u_closed(self):
return self.adaptor.IsUClosed()
def is_v_closed(self):
return self.adaptor.IsVClosed()
def is_u_rational(self):
return self.adaptor.IsURational()
def is_v_rational(self):
return self.adaptor.IsVRational()
def u_degree(self):
return self.adaptor.UDegree()
def v_degree(self):
return self.adaptor.VDegree()
def u_continuity(self):
return self.adaptor.UContinuity()
def v_continuity(self):
return self.adaptor.VContinuity()
def domain(self):
'''the u,v domain of the curve
:return: UMin, UMax, VMin, VMax
'''
return breptools_UVBounds(self)
def mid_point(self):
"""
:return: the parameter at the mid point of the face,
and its corresponding gp_Pnt
"""
u_min, u_max, v_min, v_max = self.domain()
u_mid = (u_min + u_max) / 2.
v_mid = (v_min + v_max) / 2.
return ((u_mid, v_mid), self.adaptor.Value(u_mid, v_mid))
@property
def topo(self):
if self._topo is not None:
return self._topo
else:
self._topo = Topo(self)
return self._topo
@property
def surface(self):
if self._srf is None or self.is_dirty:
self._h_srf = BRep_Tool_Surface(self)
self._srf = self._h_srf#.GetObject()
return self._srf
@property
def surface_handle(self):
if self._h_srf is None or self.is_dirty:
self.surface # force building handle
return self._h_srf
@property
def adaptor(self):
if self._adaptor is not None and not self.is_dirty:
pass
else:
self._adaptor = BRepAdaptor_Surface(self)
self._adaptor_handle = BRepAdaptor_HSurface()
self._adaptor_handle.Set(self._adaptor)
return self._adaptor
@property
def adaptor_handle(self):
if self._adaptor_handle is not None and not self.is_dirty:
pass
else:
self.adaptor
return self._adaptor_handle
def is_closed(self):
sa = ShapeAnalysis_Surface(self.surface_handle)
# sa.GetBoxUF()
return sa.IsUClosed(), sa.IsVClosed()
def is_planar(self, tol=TOLERANCE):
'''checks if the surface is planar within a tolerance
:return: bool, gp_Pln
'''
print(self.surface_handle)
is_planar_surface = GeomLib_IsPlanarSurface(self.surface_handle, tol)
return is_planar_surface.IsPlanar()
def is_trimmed(self):
"""
:return: True if the Wire delimiting the Face lies on the bounds
of the surface
if this is not the case, the wire represents a contour that delimits
the face [ think cookie cutter ]
and implies that the surface is trimmed
"""
_round = lambda x: round(x, 3)
a = map(_round, breptools_UVBounds(self))
b = map(_round, self.adaptor.Surface().Surface().GetObject().Bounds())
if a != b:
print('a,b', a, b)
return True
return False
def on_trimmed(self, u, v):
'''tests whether the surface at the u,v parameter has been trimmed
'''
if self._classify_uv is None:
self._classify_uv = BRepTopAdaptor_FClass2d(self, 1e-9)
uv = gp_Pnt2d(u, v)
if self._classify_uv.Perform(uv) == TopAbs_IN:
return True
else:
return False
def parameter_to_point(self, u, v):
'''returns the coordinate at u,v
'''
return self.surface.Value(u, v)
def point_to_parameter(self, pt):
'''
returns the uv value of a point on a surface
@param pt:
'''
sas = ShapeAnalysis_Surface(self.surface_handle)
uv = sas.ValueOfUV(pt, self.tolerance)
return uv.Coord()
def continuity_edge_face(self, edge, face):
"""
compute the continuity between two faces at :edge:
:param edge: an Edge or TopoDS_Edge from :face:
:param face: a Face or TopoDS_Face
:return: bool, GeomAbs_Shape if it has continuity, otherwise
False, None
"""
bt = BRep_Tool()
if bt.HasContinuity(edge, self, face):
continuity = bt.Continuity(edge, self, face)
return True, continuity
else:
return False, None
#===========================================================================
# Surface.project
# project curve, point on face
#===========================================================================
def project_vertex(self, pnt, tol=TOLERANCE):
'''projects self with a point, curve, edge, face, solid
method wraps dealing with the various topologies
if other is a point:
returns uv, point
'''
if isinstance(pnt, TopoDS_Vertex):
pnt = BRep_Tool.Pnt(pnt)
proj = GeomAPI_ProjectPointOnSurf(pnt, self.surface_handle, tol)
uv = proj.LowerDistanceParameters()
proj_pnt = proj.NearestPoint()
return uv, proj_pnt
def project_curve(self, other):
# this way Geom_Circle and alike are valid too
if (isinstance(other, TopoDS_Edge) or
isinstance(other, Geom_Curve) or
issubclass(other, Geom_Curve)):
# convert edge to curve
first, last = topexp.FirstVertex(other), topexp.LastVertex(other)
lbound, ubound = BRep_Tool().Parameter(first, other), BRep_Tool().Parameter(last, other)
other = BRep_Tool.Curve(other, lbound, ubound).GetObject()
return geomprojlib.Project(other, self.surface_handle)
def project_edge(self, edg):
if hasattr(edg, 'adaptor'):
return self.project_curve(self, self.adaptor)
return self.project_curve(self, to_adaptor_3d(edg))
def iso_curve(self, u_or_v, param):
"""
get the iso curve from a u,v + parameter
:param u_or_v:
:param param:
:return:
"""
uv = 0 if u_or_v == 'u' else 1
iso = Adaptor3d_IsoCurve(self.adaptor_handle.GetHandle(), uv, param)
return iso
def edges(self):
return [Edge(i) for i in WireExplorer(next(self.topo.wires())).ordered_edges()]
def __repr__(self):
return self.name
def __str__(self):
return self.__repr__()
if __name__ == "__main__":
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeSphere
sph = BRepPrimAPI_MakeSphere(1, 1).Face()
fc = Face(sph)
print(fc.is_trimmed())
print(fc.is_planar())
| chenkianwee/envuo | py4design/py3dmodel/OCCUtils/face.py | Python | gpl-3.0 | 12,756 | [
"Gaussian"
] | 00b41ec0533aa39e5e65d9f552f1c2617acd552c35ba74b4269c0064c07e6cf0 |
""" A computing element class that uses sudo
"""
import os
import pwd
import stat
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities import DErrno
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
from DIRAC.Resources.Computing.ComputingElement import ComputingElement
__RCSID__ = "$Id$"
class SudoComputingElement(ComputingElement):
#############################################################################
def __init__(self, ceUniqueID):
""" Standard constructor.
"""
super(SudoComputingElement, self).__init__(ceUniqueID)
self.submittedJobs = 0
#############################################################################
def _addCEConfigDefaults(self):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# Assure that any global parameters are loaded
super(SudoComputingElement, self)._addCEConfigDefaults()
#############################################################################
def submitJob(self, executableFile, proxy, **kwargs):
""" Method to submit job, overridden from super-class.
"""
self.log.verbose('Setting up proxy for payload')
result = self.writeProxyToFile(proxy)
if not result['OK']:
return result
payloadProxy = result['Value']
if 'X509_USER_PROXY' not in os.environ:
self.log.error('X509_USER_PROXY variable for pilot proxy not found in local environment')
return S_ERROR(DErrno.EPROXYFIND, "X509_USER_PROXY not found")
pilotProxy = os.environ['X509_USER_PROXY']
self.log.info('Pilot proxy X509_USER_PROXY=%s' % pilotProxy)
# See if a fixed value has been given
payloadUsername = self.ceParameters.get('PayloadUser')
if payloadUsername:
self.log.info('Payload username %s from PayloadUser in ceParameters' % payloadUsername)
else:
# First username in the sequence to use when running payload job
# If first is pltXXp00 then have pltXXp01, pltXXp02, ...
try:
baseUsername = self.ceParameters.get('BaseUsername')
baseCounter = int(baseUsername[-2:])
self.log.info("Base username from BaseUsername in ceParameters : %s" % baseUsername)
except Exception:
baseUsername = os.environ['USER'] + '00p00'
baseCounter = 0
self.log.info('Base username from $USER + 00p00 : %s' % baseUsername)
# Next one in the sequence
payloadUsername = baseUsername[:-2] + ('%02d' % (baseCounter + self.submittedJobs))
self.log.info('Payload username set to %s using jobs counter' % payloadUsername)
try:
payloadUID = pwd.getpwnam(payloadUsername).pw_uid
payloadGID = pwd.getpwnam(payloadUsername).pw_gid
except KeyError:
error = S_ERROR('User "' + str(payloadUsername) + '" does not exist!')
return error
self.log.verbose('Starting process for monitoring payload proxy')
gThreadScheduler.addPeriodicTask(self.proxyCheckPeriod, self.monitorProxy,
taskArgs=(pilotProxy, payloadProxy, payloadUsername, payloadUID, payloadGID),
executions=0, elapsedTime=0)
# Submit job
self.log.info('Changing permissions of executable (%s) to 0755' % executableFile)
try:
os.chmod(os.path.abspath(executableFile), stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
except OSError as x:
self.log.error('Failed to change permissions of executable to 0755 with exception',
'\n%s' % (x))
result = self.sudoExecute(os.path.abspath(executableFile), payloadProxy, payloadUsername, payloadUID, payloadGID)
if not result['OK']:
self.log.error('Failed sudoExecute', result)
return result
self.log.debug('Sudo CE result OK')
self.submittedJobs += 1
return S_OK()
#############################################################################
def sudoExecute(self, executableFile, payloadProxy, payloadUsername, payloadUID, payloadGID):
"""Run sudo with checking of the exit status code.
"""
# We now implement a file giveaway using groups, to avoid any need to sudo to root.
# Each payload user must have their own group. The pilot user must be a member
# of all of these groups. This allows the pilot user to set the group of the
# payloadProxy file to be that of the payload user. The payload user can then
# read it and make a copy of it (/tmp/x509up_uNNNN) that it owns. Some grid
# commands check that the proxy is owned by the current user so the copy stage
# is necessary.
# 1) Make sure the payload user can read its proxy via its per-user group
os.chown(payloadProxy, -1, payloadGID)
os.chmod(payloadProxy, stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP)
# 2) Now create a copy of the proxy owned by the payload user
result = shellCall(0, '/usr/bin/sudo -u %s sh -c "cp -f %s /tmp/x509up_u%d ; chmod 0400 /tmp/x509up_u%d"' %
(payloadUsername, payloadProxy, payloadUID, payloadUID), callbackFunction=self.sendOutput)
# 3) Make sure the current directory is +rwx by the pilot's group
# (needed for InstallDIRAC but not for LHCbInstallDIRAC, for example)
os.chmod('.', os.stat('.').st_mode | stat.S_IRWXG)
# Run the executable (the wrapper in fact)
cmd = "/usr/bin/sudo -u %s " % payloadUsername
cmd += "PATH=$PATH "
cmd += "DIRACSYSCONFIG=/scratch/%s/pilot.cfg " % os.environ['USER']
cmd += "LD_LIBRARY_PATH=$LD_LIBRARY_PATH "
cmd += "PYTHONPATH=$PYTHONPATH "
cmd += "X509_CERT_DIR=$X509_CERT_DIR "
cmd += "X509_USER_PROXY=/tmp/x509up_u%d sh -c '%s'" % (payloadUID, executableFile)
self.log.info('CE submission command is: %s' % cmd)
result = shellCall(0, cmd, callbackFunction=self.sendOutput)
if not result['OK']:
result['Value'] = (0, '', '')
return result
resultTuple = result['Value']
status = resultTuple[0]
stdOutput = resultTuple[1]
stdError = resultTuple[2]
self.log.info("Status after the sudo execution is %s" % str(status))
if status > 128:
error = S_ERROR(status)
error['Value'] = (status, stdOutput, stdError)
return error
return result
#############################################################################
def getCEStatus(self):
""" Method to return information on running and pending jobs.
"""
result = S_OK()
result['SubmittedJobs'] = 0
result['RunningJobs'] = 0
result['WaitingJobs'] = 0
return result
#############################################################################
def monitorProxy(self, pilotProxy, payloadProxy, payloadUsername, payloadUID, payloadGID):
""" Monitor the payload proxy and renew as necessary.
"""
retVal = self._monitorProxy(pilotProxy, payloadProxy)
if not retVal['OK']:
# Failed to renew the proxy, nothing else to be done
return retVal
if not retVal['Value']:
# No need to renew the proxy, nothing else to be done
return retVal
self.log.info('Re-executing sudo to make renewed payload proxy available as before')
# New version of the proxy file, so we have to do the copy again
# 1) Make sure the payload user can read its proxy via its per-user group
os.chown(payloadProxy, -1, payloadGID)
os.chmod(payloadProxy, stat.S_IRUSR + stat.S_IWUSR + stat.S_IRGRP)
# 2) Now recreate the copy of the proxy owned by the payload user
cmd = '/usr/bin/sudo -u %s sh -c "cp -f %s /tmp/x509up_u%d ; chmod 0400 /tmp/x509up_u%d"' % (payloadUsername,
payloadProxy,
payloadUID,
payloadUID)
result = shellCall(0,
cmd,
callbackFunction=self.sendOutput)
if not result['OK']:
self.log.error('Could not recreate the copy of the proxy', "CMD: %s; %s" % (cmd, result['Message']))
return S_OK('Proxy checked')
| fstagni/DIRAC | Resources/Computing/SudoComputingElement.py | Python | gpl-3.0 | 8,268 | [
"DIRAC"
] | e05c4ab6bbbc1691551b05cd971fc079126ee119a260437d100e967988eafd37 |
from __future__ import print_function
from astrometry.util.fits import *
import pylab as plt
import numpy as np
from glob import glob
from astrometry.util.plotutils import *
from astrometry.libkd.spherematch import *
from astrometry.util.resample import *
from astrometry.util.util import *
ps = PlotSequence('cosmos')
baseA = 'cosmos-dr5-60/'
baseB = 'cosmos-dr5-67/'
Atxt = '60'
Btxt = '67'
TA = merge_tables([fits_table(fn) for fn in glob(baseA + 'tractor/*/tractor-*.fits')])
print('Total of', len(TA), 'sources in 60')
TA.cut(TA.brick_primary)
print(len(TA), 'brick primary')
TB = merge_tables([fits_table(fn) for fn in glob(baseB + 'tractor/*/tractor-*.fits')])
print('Total of', len(TB), 'sources in 67')
TB.cut(TB.brick_primary)
print(len(TB), 'brick primary')
ramin = min(TA.ra.min(), TB.ra.min())
ramax = max(TA.ra.max(), TB.ra.max())
decmin = min(TA.dec.min(), TB.dec.min())
decmax = max(TA.dec.max(), TB.dec.max())
# Create low-res depth maps
pixsc = 10. * 0.262/3600.
rc,dc = (ramin+ramax)/2., (decmin+decmax)/2.
w = int((ramax - ramin) * np.cos(np.deg2rad(dc)) / pixsc)
h = int((decmax - decmin) / pixsc)
wcs = Tan(rc, dc, w/2., h/2., -pixsc, 0., 0., pixsc, float(w), float(h))
#print('WCS:', wcs)
#for band in ['g','r','z']:
for band in ['g']:
psfdepthA = np.zeros(wcs.shape, np.float32)
psfdepthB = np.zeros(wcs.shape, np.float32)
for fn in glob(baseA + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
psfdepthA[Yo,Xo] = dmap[Yi,Xi]
for fn in glob(baseB + 'coadd/*/*/legacysurvey-*-depth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
psfdepthB[Yo,Xo] = dmap[Yi,Xi]
galdepthA = np.zeros(wcs.shape, np.float32)
galdepthB = np.zeros(wcs.shape, np.float32)
for fn in glob(baseA + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
galdepthA[Yo,Xo] = dmap[Yi,Xi]
for fn in glob(baseB + 'coadd/*/*/legacysurvey-*-galdepth-%s.fits*' % band):
print('Reading', fn)
iwcs = Tan(fn, 1)
Yo,Xo,Yi,Xi,nil = resample_with_wcs(wcs, iwcs)
dmap = fitsio.read(fn)
#I = np.flatnonzero(np.isfinite(dmap) * (dmap > 0))
#print(len(I), 'finite & positive values')
galdepthB[Yo,Xo] = dmap[Yi,Xi]
print('PsfdepthA (iv)', psfdepthA.min(), psfdepthA.max())
print('PsfdepthB (iv)', psfdepthB.min(), psfdepthB.max())
psfdepthA = -2.5 * (np.log10(5./np.sqrt(psfdepthA)) - 9)
psfdepthB = -2.5 * (np.log10(5./np.sqrt(psfdepthB)) - 9)
print('PsfdepthA', psfdepthA.min(), psfdepthA.max())
print('PsfdepthB', psfdepthB.min(), psfdepthB.max())
galdepthA = -2.5 * (np.log10(5./np.sqrt(galdepthA)) - 9)
galdepthB = -2.5 * (np.log10(5./np.sqrt(galdepthB)) - 9)
print('GaldepthA', galdepthA.min(), galdepthA.max())
print('GaldepthB', galdepthB.min(), galdepthB.max())
ima = dict(interpolation='nearest', origin='lower',
extent=[ramax,ramin,decmin,decmax], vmin=20.0, vmax=24.5)
plt.clf()
plt.subplot(1,2,1)
plt.imshow(psfdepthA, **ima)
plt.title(Atxt)
plt.subplot(1,2,2)
plt.imshow(psfdepthB, **ima)
plt.title(Btxt)
plt.suptitle('PSF Depth maps (%s)' % band)
ps.savefig()
plt.clf()
plt.subplot(1,2,1)
plt.imshow(galdepthA, **ima)
plt.title(Atxt)
plt.subplot(1,2,2)
plt.imshow(galdepthB, **ima)
plt.title(Btxt)
plt.suptitle('Galaxy Depth maps (%s)' % band)
ps.savefig()
# dd = np.append(galdepthA.ravel(), galdepthB.ravel())
# dd = dd[np.isfinite(dd)]
# thresh = np.percentile(dd, 10)
# print('Depth threshold:', thresh)
thresh = 24.0
hh,ww = wcs.shape
ok,xx,yy = wcs.radec2pixelxy(TA.ra, TA.dec)
xx = np.clip((np.round(xx) - 1), 0, ww-1).astype(int)
yy = np.clip((np.round(yy) - 1), 0, hh-1).astype(int)
I = np.flatnonzero((galdepthA[yy,xx] > thresh) * (galdepthB[yy,xx] > thresh))
print(len(I), 'of', len(TA), 'sources in A are in good-depth regions')
TA.cut(I)
ok,xx,yy = wcs.radec2pixelxy(TB.ra, TB.dec)
xx = np.clip((np.round(xx) - 1), 0, ww-1).astype(int)
yy = np.clip((np.round(yy) - 1), 0, hh-1).astype(int)
I = np.flatnonzero((galdepthA[yy,xx] > thresh) * (galdepthB[yy,xx] > thresh))
print(len(I), 'of', len(TB), 'sources in B are in good-depth regions')
TB.cut(I)
ha = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
hb = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
plt.clf()
plt.hist(np.maximum(psfdepthA.ravel(), 18), color='b', label=Atxt, **ha)
plt.hist(np.maximum(psfdepthB.ravel(), 18), color='r', label=Btxt, **hb)
plt.xlim(18,27)
plt.legend()
plt.title('PSF depth map values (g mag)')
ps.savefig()
plt.clf()
plt.hist(np.maximum(galdepthA.ravel(), 18), color='b', label=Atxt, **ha)
plt.hist(np.maximum(galdepthB.ravel(), 18), color='r', label=Btxt, **hb)
plt.xlim(18,27)
plt.legend()
plt.title('Galaxy depth map values (g mag)')
ps.savefig()
TA.mag_g = -2.5 * (np.log10(TA.flux_g) - 9)
TB.mag_g = -2.5 * (np.log10(TB.flux_g) - 9)
TA.psfdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TA.psfdepth_g)) - 9)
TB.psfdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TB.psfdepth_g)) - 9)
TA.galdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TA.galdepth_g)) - 9)
TB.galdepth_mag_g = -2.5 * (np.log10(5./np.sqrt(TB.galdepth_g)) - 9)
ha = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
hb = dict(range=(18,27), bins=50, histtype='stepfilled', alpha=0.1)
ha2 = dict(range=(18,27), bins=50, histtype='step', alpha=0.5)
hb2 = dict(range=(18,27), bins=50, histtype='step', alpha=0.5)
plt.clf()
plt.hist(TA.mag_g, color='b', label=Atxt, **ha)
plt.hist(TA.mag_g, color='b', **ha2)
plt.hist(TB.mag_g, color='r', label=Btxt, **hb)
plt.hist(TB.mag_g, color='r', **hb2)
plt.xlim(18,27)
plt.legend()
plt.xlabel('All sources: g mag')
ps.savefig()
ha = dict(range=(23,25), bins=50, histtype='stepfilled', alpha=0.1)
hb = dict(range=(23,25), bins=50, histtype='stepfilled', alpha=0.1)
plt.clf()
plt.hist(TA.psfdepth_mag_g, color='b', label=Atxt, **ha)
plt.hist(TB.psfdepth_mag_g, color='r', label=Btxt, **hb)
plt.xlim(23,25)
plt.legend()
plt.title('PSF depth for sources (g mag)')
ps.savefig()
plt.clf()
plt.hist(TA.galdepth_mag_g, color='b', label=Atxt, **ha)
plt.hist(TB.galdepth_mag_g, color='r', label=Btxt, **hb)
plt.xlim(23,25)
plt.legend()
plt.title('Gal depth for sources (g mag)')
ps.savefig()
ha = dict(range=((ramin,ramax),(decmin,decmax)), doclf=False,
docolorbar=False, imshowargs=dict(vmin=0, vmax=14))
plt.clf()
plt.subplot(1,2,1)
plothist(TA.ra, TA.dec, 200, **ha)
plt.title(Atxt)
plt.subplot(1,2,2)
plothist(TB.ra, TB.dec, 200, **ha)
plt.title(Btxt)
plt.suptitle('All sources')
ps.savefig()
I,J,d = match_radec(TA.ra, TA.dec, TB.ra, TB.dec, 1./3600.)
unmatchedA = np.ones(len(TA), bool)
unmatchedB = np.ones(len(TB), bool)
unmatchedA[I] = False
unmatchedB[J] = False
ha = dict(range=((ramin,ramax),(decmin,decmax)), doclf=False,
docolorbar=False, imshowargs=dict(vmin=0, vmax=5))
plt.clf()
plt.subplot(1,2,1)
plothist(TA.ra[unmatchedA], TA.dec[unmatchedA], 200, **ha)
plt.title(Atxt)
plt.subplot(1,2,2)
plothist(TB.ra[unmatchedB], TB.dec[unmatchedB], 200, **ha)
plt.title(Btxt)
plt.suptitle('Un-matched sources')
ps.savefig()
| legacysurvey/pipeline | py/legacyanalysis/cosmos-6x.py | Python | gpl-2.0 | 7,883 | [
"Galaxy"
] | dd12a0bc88be022b54d670765b9fc4730daac3c36ea80d175113da19310a8309 |
#!/usr/bin/env python
# CREATED:2014-01-18 14:09:05 by Brian McFee <brm2132@columbia.edu>
# unit tests for util routines
# Disable cache
import os
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except:
pass
import platform
import numpy as np
import scipy.sparse
import pytest
import warnings
import librosa
from test_core import srand
np.set_printoptions(precision=3)
# TODO: remove at 0.9
def test_example_audio_file():
assert os.path.exists(librosa.util.example_audio_file())
@pytest.mark.parametrize("frame_length", [4, 8])
@pytest.mark.parametrize("hop_length", [2, 4])
@pytest.mark.parametrize("y", [np.random.randn(32)])
@pytest.mark.parametrize("axis", [0, -1])
def test_frame1d(frame_length, hop_length, axis, y):
y_frame = librosa.util.frame(
y, frame_length=frame_length, hop_length=hop_length, axis=axis
)
if axis == -1:
y_frame = y_frame.T
for i in range(y_frame.shape[0]):
assert np.allclose(
y_frame[i], y[i * hop_length : (i * hop_length + frame_length)]
)
@pytest.mark.parametrize("frame_length", [4, 8])
@pytest.mark.parametrize("hop_length", [2, 4])
@pytest.mark.parametrize(
"y, axis",
[
(np.asfortranarray(np.random.randn(16, 32)), -1),
(np.ascontiguousarray(np.random.randn(16, 32)), 0),
],
)
def test_frame2d(frame_length, hop_length, axis, y):
y_frame = librosa.util.frame(
y, frame_length=frame_length, hop_length=hop_length, axis=axis
)
if axis == -1:
y_frame = y_frame.T
y = y.T
for i in range(y_frame.shape[0]):
assert np.allclose(
y_frame[i], y[i * hop_length : (i * hop_length + frame_length)]
)
def test_frame_0stride():
x = np.arange(10)
xpad = x[np.newaxis]
xpad2 = np.atleast_2d(x)
xf = librosa.util.frame(x, 3, 1)
xfpad = librosa.util.frame(xpad, 3, 1)
xfpad2 = librosa.util.frame(xpad2, 3, 1)
assert np.allclose(xf, xfpad)
assert np.allclose(xf, xfpad2)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_frame_badtype():
librosa.util.frame([1, 2, 3, 4], frame_length=2, hop_length=1)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("x", [np.arange(16)])
def test_frame_too_short(x, axis):
librosa.util.frame(x, frame_length=17, hop_length=1, axis=axis)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_frame_bad_hop():
librosa.util.frame(np.arange(16), frame_length=4, hop_length=0)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("axis", [1, 2])
def test_frame_bad_axis(axis):
librosa.util.frame(np.zeros((3, 3, 3)), frame_length=2, hop_length=1, axis=axis)
@pytest.mark.parametrize(
"x_bad, axis",
[(np.zeros((4, 10), order="C"), -1), (np.zeros((4, 10), order="F"), 0)],
)
def test_frame_bad_contiguity(x_bad, axis):
# Populate fixture with random data
x_bad += np.random.randn(*x_bad.shape)
# And make a contiguous copy of it
if axis == 0:
x_good = np.ascontiguousarray(x_bad)
else:
x_good = np.asfortranarray(x_bad)
# Verify that the aligned data is good
assert np.allclose(x_bad, x_good)
# The test here checks two things:
# 1) that output is identical if we provide properly contiguous input
# 2) that a warning is issued if the input is not properly contiguous
x_good_f = librosa.util.frame(x_good, frame_length=2, hop_length=1, axis=axis)
with pytest.warns(UserWarning):
x_bad_f = librosa.util.frame(x_bad, frame_length=2, hop_length=1, axis=axis)
assert np.allclose(x_good_f, x_bad_f)
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("m", [0, 10])
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"])
def test_pad_center(y, m, axis, mode):
n = m + y.shape[axis]
y_out = librosa.util.pad_center(y, n, axis=axis, mode=mode)
n_len = y.shape[axis]
n_pad = int((n - n_len) / 2)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(n_pad, n_pad + n_len)
assert np.allclose(y, y_out[tuple(eq_slice)])
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("n", [0, 10])
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("mode", ["constant", "edge", "reflect"])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_pad_center_fail(y, n, axis, mode):
librosa.util.pad_center(y, n, axis=axis, mode=mode)
@pytest.mark.parametrize("y", [np.ones((16,)), np.ones((16, 16))])
@pytest.mark.parametrize("m", [-5, 0, 5])
@pytest.mark.parametrize("axis", [0, -1])
def test_fix_length(y, m, axis):
n = m + y.shape[axis]
y_out = librosa.util.fix_length(y, n, axis=axis)
eq_slice = [slice(None)] * y.ndim
eq_slice[axis] = slice(y.shape[axis])
if n > y.shape[axis]:
assert np.allclose(y, y_out[tuple(eq_slice)])
else:
assert np.allclose(y[tuple(eq_slice)], y)
@pytest.mark.parametrize("frames", [np.arange(20, 100, step=15)])
@pytest.mark.parametrize("x_min", [0, 20])
@pytest.mark.parametrize("x_max", [20, 70, 120])
@pytest.mark.parametrize("pad", [False, True])
def test_fix_frames(frames, x_min, x_max, pad):
f_fix = librosa.util.fix_frames(frames, x_min=x_min, x_max=x_max, pad=pad)
if x_min is not None:
if pad:
assert f_fix[0] == x_min
assert np.all(f_fix >= x_min)
if x_max is not None:
if pad:
assert f_fix[-1] == x_max
assert np.all(f_fix <= x_max)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("frames", [np.arange(-20, 100)])
@pytest.mark.parametrize("x_min", [None, 0, 20])
@pytest.mark.parametrize("x_max", [None, 0, 20])
@pytest.mark.parametrize("pad", [False, True])
def test_fix_frames_fail_negative(frames, x_min, x_max, pad):
librosa.util.fix_frames(frames, x_min, x_max, pad)
@pytest.mark.parametrize("norm", [np.inf, -np.inf, 0, 0.5, 1.0, 2.0, None])
@pytest.mark.parametrize(
"ndims,axis",
[(1, 0), (1, -1), (2, 0), (2, 1), (2, -1), (3, 0), (3, 1), (3, 2), (3, -1)],
)
def test_normalize(ndims, norm, axis):
srand()
X = np.random.randn(*([4] * ndims))
X_norm = librosa.util.normalize(X, norm=norm, axis=axis)
# Shape and dtype checks
assert X_norm.dtype == X.dtype
assert X_norm.shape == X.shape
if norm is None:
assert np.allclose(X, X_norm)
return
X_norm = np.abs(X_norm)
if norm == np.inf:
values = np.max(X_norm, axis=axis)
elif norm == -np.inf:
values = np.min(X_norm, axis=axis)
elif norm == 0:
# XXX: normalization here isn't quite right
values = np.ones(1)
else:
values = np.sum(X_norm ** norm, axis=axis) ** (1.0 / norm)
assert np.allclose(values, np.ones_like(values))
@pytest.mark.parametrize("norm", ["inf", -0.5, -2])
@pytest.mark.parametrize("X", [np.ones((3, 3))])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_badnorm(X, norm):
librosa.util.normalize(X, norm=norm)
@pytest.mark.parametrize("badval", [np.nan, np.inf, -np.inf])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_bad_input(badval):
X = np.ones((3, 3))
X[0] = badval
librosa.util.normalize(X, norm=np.inf, axis=0)
@pytest.mark.parametrize("fill", [7, "foo"])
@pytest.mark.parametrize("X", [np.ones((2, 2))])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_badfill(X, fill):
librosa.util.normalize(X, fill=fill)
@pytest.mark.parametrize("x", [np.asarray([[0, 1, 2, 3]])])
@pytest.mark.parametrize(
"threshold, result",
[
(None, [[0, 1, 1, 1]]),
(1, [[0, 1, 1, 1]]),
(2, [[0, 1, 1, 1]]),
(3, [[0, 1, 2, 1]]),
(4, [[0, 1, 2, 3]]),
],
)
def test_normalize_threshold(x, threshold, result):
assert np.allclose(librosa.util.normalize(x, threshold=threshold), result)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("x", [np.asarray([[0, 1, 2, 3]])])
@pytest.mark.parametrize("threshold", [0, -1])
def test_normalize_threshold_fail(x, threshold):
librosa.util.normalize(x, threshold=threshold)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_normalize_fill_l0():
X = np.ones((2, 2))
librosa.util.normalize(X, fill=True, norm=0)
@pytest.mark.parametrize("norm", [1, 2, np.inf])
@pytest.mark.parametrize("X", [np.zeros((3, 3))])
def test_normalize_fill_allaxes(X, norm):
Xn = librosa.util.normalize(X, fill=True, axis=None, norm=norm)
if norm is np.inf:
assert np.allclose(Xn, 1)
else:
assert np.allclose(np.sum(Xn ** norm) ** (1.0 / norm), 1)
@pytest.mark.parametrize("norm", [1, 2, np.inf])
@pytest.mark.parametrize("X", [np.zeros((3, 3))])
def test_normalize_nofill(X, norm):
Xn = librosa.util.normalize(X, fill=False, norm=norm)
assert np.allclose(Xn, 0)
@pytest.mark.parametrize("X", [np.asarray([[0.0, 1], [0, 1]])])
@pytest.mark.parametrize("norm,value", [(1, 0.5), (2, np.sqrt(2) / 2), (np.inf, 1)])
@pytest.mark.parametrize("threshold", [0.5, 2])
def test_normalize_fill(X, threshold, norm, value):
Xn = librosa.util.normalize(X, fill=True, norm=norm, threshold=threshold)
assert np.allclose(Xn, value)
@pytest.mark.parametrize("ndim", [1, 3])
@pytest.mark.parametrize("axis", [0, 1, -1])
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("value", [None, np.min, np.mean, np.max])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_axis_sort_badndim(ndim, axis, index, value):
data = np.zeros([2] * ndim)
librosa.util.axis_sort(data, axis=axis, index=index, value=value)
@pytest.mark.parametrize("ndim", [2])
@pytest.mark.parametrize("axis", [0, 1, -1])
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("value", [None, np.min, np.mean, np.max])
def test_axis_sort(ndim, axis, index, value):
srand()
data = np.random.randn(*([10] * ndim))
if index:
Xsorted, idx = librosa.util.axis_sort(data, axis=axis, index=index, value=value)
cmp_slice = [slice(None)] * ndim
cmp_slice[axis] = idx
assert np.allclose(data[tuple(cmp_slice)], Xsorted)
else:
Xsorted = librosa.util.axis_sort(data, axis=axis, index=index, value=value)
compare_axis = np.mod(1 - axis, 2)
if value is None:
value = np.argmax
sort_values = value(Xsorted, axis=compare_axis)
assert np.allclose(sort_values, np.sort(sort_values))
@pytest.mark.parametrize(
"int_from, int_to",
[
(np.asarray([[0, 2], [0, 4], [3, 6]]), np.zeros((0, 2), dtype=int)),
(np.zeros((0, 2), dtype=int), np.asarray([[0, 2], [0, 4], [3, 6]])),
],
)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_match_intervals_empty(int_from, int_to):
librosa.util.match_intervals(int_from, int_to)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_match_intervals_strict_fail():
int_from = np.asarray([[0, 3], [2, 4], [5, 7]])
int_to = np.asarray([[0, 2], [0, 4]])
librosa.util.match_intervals(int_from, int_to, strict=True)
@pytest.mark.parametrize("int_from", [np.asarray([[0, 3], [2, 4], [5, 7]])])
@pytest.mark.parametrize("int_to", [np.asarray([[0, 2], [0, 4], [3, 6]])])
@pytest.mark.parametrize("matches", [np.asarray([1, 1, 2])])
def test_match_intervals_strict(int_from, int_to, matches):
test_matches = librosa.util.match_intervals(int_from, int_to, strict=True)
assert np.array_equal(matches, test_matches)
@pytest.mark.parametrize("int_from", [np.asarray([[0, 3], [2, 4], [5, 7]])])
@pytest.mark.parametrize(
"int_to,matches",
[
(np.asarray([[0, 2], [0, 4], [3, 6]]), np.asarray([1, 1, 2])),
(np.asarray([[0, 2], [0, 4]]), np.asarray([1, 1, 1])),
],
)
def test_match_intervals_nonstrict(int_from, int_to, matches):
test_matches = librosa.util.match_intervals(int_from, int_to, strict=False)
assert np.array_equal(matches, test_matches)
@pytest.mark.parametrize("n", [1, 5, 20, 100])
@pytest.mark.parametrize("m", [1, 5, 20, 100])
def test_match_events(n, m):
srand()
ev1 = np.abs(np.random.randn(n))
ev2 = np.abs(np.random.randn(m))
match = librosa.util.match_events(ev1, ev2)
for i in range(len(match)):
values = np.asarray([np.abs(ev1[i] - e2) for e2 in ev2])
assert not np.any(values < values[match[i]])
@pytest.mark.parametrize(
"ev1,ev2", [(np.array([]), np.arange(5)), (np.arange(5), np.array([]))]
)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_match_events_failempty(ev1, ev2):
librosa.util.match_events(ev1, ev2)
@pytest.mark.parametrize("events_from", [np.asarray([5, 15, 25])])
@pytest.mark.parametrize("events_to", [np.asarray([0, 10, 20, 30])])
@pytest.mark.parametrize(
"left,right,target", [(False, True, [10, 20, 30]), (True, False, [0, 10, 20])]
)
def test_match_events_onesided(events_from, events_to, left, right, target):
events_from = np.asarray(events_from)
events_to = np.asarray(events_to)
match = librosa.util.match_events(events_from, events_to, left=left, right=right)
assert np.allclose(target, events_to[match])
def test_match_events_twosided():
events_from = np.asarray([5, 15, 25])
events_to = np.asarray([5, 15, 25, 30])
match = librosa.util.match_events(events_from, events_to, left=False, right=False)
assert np.allclose(match, [0, 1, 2])
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize(
"events_from,events_to,left,right",
[
([40, 15, 25], [0, 10, 20, 30], False, True), # right-sided fail
([-1, 15, 25], [0, 10, 20, 30], True, False), # left-sided fail
([-1, 15, 25], [0, 10, 20, 30], False, False), # two-sided fail
],
)
def test_match_events_onesided_fail(events_from, events_to, left, right):
events_from = np.asarray(events_from)
events_to = np.asarray(events_to)
librosa.util.match_events(events_from, events_to, left=left, right=right)
@pytest.mark.parametrize("ndim, axis", [(n, m) for n in range(1, 5) for m in range(n)])
def test_localmax(ndim, axis):
srand()
data = np.random.randn(*([7] * ndim))
lm = librosa.util.localmax(data, axis=axis)
for hits in np.argwhere(lm):
for offset in [-1, 1]:
compare_idx = hits.copy()
compare_idx[axis] += offset
if compare_idx[axis] < 0:
continue
if compare_idx[axis] >= data.shape[axis]:
continue
if offset < 0:
assert data[tuple(hits)] > data[tuple(compare_idx)]
else:
assert data[tuple(hits)] >= data[tuple(compare_idx)]
@pytest.mark.parametrize("ndim, axis", [(n, m) for n in range(1, 5) for m in range(n)])
def test_localmin(ndim, axis):
srand()
data = np.random.randn(*([7] * ndim))
lm = librosa.util.localmin(data, axis=axis)
for hits in np.argwhere(lm):
for offset in [-1, 1]:
compare_idx = hits.copy()
compare_idx[axis] += offset
if compare_idx[axis] < 0:
continue
if compare_idx[axis] >= data.shape[axis]:
continue
if offset < 0:
assert data[tuple(hits)] < data[tuple(compare_idx)]
else:
assert data[tuple(hits)] <= data[tuple(compare_idx)]
@pytest.mark.parametrize("x", [np.random.randn(_) ** 2 for _ in [1, 5, 10, 100]])
@pytest.mark.parametrize("pre_max", [0, 1, 10])
@pytest.mark.parametrize("post_max", [1, 10])
@pytest.mark.parametrize("pre_avg", [0, 1, 10])
@pytest.mark.parametrize("post_avg", [1, 10])
@pytest.mark.parametrize("wait", [0, 1, 10])
@pytest.mark.parametrize("delta", [0.05, 100.0])
def test_peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
peaks = librosa.util.peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait)
for i in peaks:
# Test 1: is it a peak in this window?
s = i - pre_max
if s < 0:
s = 0
t = i + post_max
diff = x[i] - np.max(x[s:t])
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 2: is it a big enough peak to count?
s = i - pre_avg
if s < 0:
s = 0
t = i + post_avg
diff = x[i] - (delta + np.mean(x[s:t]))
assert diff > 0 or np.isclose(diff, 0, rtol=1e-3, atol=1e-4)
# Test 3: peak separation
assert not np.any(np.diff(peaks) <= wait)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("x", [np.random.randn(_) ** 2 for _ in [1, 5, 10, 100]])
@pytest.mark.parametrize(
"pre_max,post_max,pre_avg,post_avg,delta,wait",
[
(-1, 1, 1, 1, 0.05, 1), # negative pre-max
(1, -1, 1, 1, 0.05, 1), # negative post-max
(1, 0, 1, 1, 0.05, 1), # 0 post-max
(1, 1, -1, 1, 0.05, 1), # negative pre-avg
(1, 1, 1, -1, 0.05, 1), # negative post-avg
(1, 1, 1, 0, 0.05, 1), # zero post-avg
(1, 1, 1, 1, -0.05, 1), # negative delta
(1, 1, 1, 1, 0.05, -1), # negative wait
],
)
def test_peak_pick_fail(x, pre_max, post_max, pre_avg, post_avg, delta, wait):
librosa.util.peak_pick(x, pre_max, post_max, pre_avg, post_avg, delta, wait)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_peak_pick_shape_fail():
# Can't pick peaks on 2d inputs
librosa.util.peak_pick(np.eye(2), 1, 1, 1, 1, 0.5, 1)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("ndim", [3, 4])
def test_sparsify_rows_ndimfail(ndim):
X = np.zeros([2] * ndim)
librosa.util.sparsify_rows(X)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("quantile", [1.0, -1, 2.0])
@pytest.mark.parametrize("X", [np.ones((3, 3))])
def test_sparsify_rows_badquantile(X, quantile):
librosa.util.sparsify_rows(X, quantile=quantile)
@pytest.mark.parametrize("dtype", [None, np.float32, np.float64])
@pytest.mark.parametrize("ref_dtype", [np.float32, np.float64])
def test_sparsify_rows_dtype(dtype, ref_dtype):
x = np.ones(10, dtype=ref_dtype)
xs = librosa.util.sparsify_rows(x, dtype=dtype)
if dtype is None:
assert xs.dtype == x.dtype
else:
assert xs.dtype == dtype
@pytest.mark.parametrize("ndim", [1, 2])
@pytest.mark.parametrize("d", [1, 5, 10, 100])
@pytest.mark.parametrize("q", [0.0, 0.01, 0.25, 0.5, 0.99])
def test_sparsify_rows(ndim, d, q):
srand()
X = np.random.randn(*([d] * ndim)) ** 4
X = np.asarray(X)
xs = librosa.util.sparsify_rows(X, quantile=q)
if ndim == 1:
X = X.reshape((1, -1))
assert np.allclose(xs.shape, X.shape)
# And make sure that xs matches X on nonzeros
xsd = np.asarray(xs.todense())
for i in range(xs.shape[0]):
assert np.allclose(xsd[i, xs[i].indices], X[i, xs[i].indices])
# Compute row-wise magnitude marginals
v_in = np.sum(np.abs(X), axis=-1)
v_out = np.sum(np.abs(xsd), axis=-1)
# Ensure that v_out retains 1-q fraction of v_in
assert np.all(v_out >= (1.0 - q) * v_in)
@pytest.mark.parametrize(
"searchdir",
[
os.path.join(os.path.curdir, "tests"),
os.path.join(os.path.curdir, "tests", "data"),
],
)
@pytest.mark.parametrize("ext", [None, "wav", "WAV", ["wav"], ["WAV"]])
@pytest.mark.parametrize("recurse", [True])
@pytest.mark.parametrize(
"case_sensitive", list({False} | {platform.system() != "Windows"})
)
@pytest.mark.parametrize("limit", [None, 1, 2])
@pytest.mark.parametrize("offset", [0, 1, -1])
@pytest.mark.parametrize(
"output",
[
[
os.path.join(os.path.abspath(os.path.curdir), "tests", "data", s)
for s in [
"test1_22050.mp3",
"test1_22050.wav",
"test1_44100.wav",
"test2_8000.wav",
]
]
],
)
def test_find_files(searchdir, ext, recurse, case_sensitive, limit, offset, output):
files = librosa.util.find_files(
searchdir,
ext=ext,
recurse=recurse,
case_sensitive=case_sensitive,
limit=limit,
offset=offset,
)
targets = output
if ext is not None:
# If we're only seeking wavs, bump off the mp3 file
targets = targets[1:]
s1 = slice(offset, None)
s2 = slice(limit)
if case_sensitive and ext not in (None, "wav", ["wav"]):
assert len(files) == 0
else:
assert set(files) == set(targets[s1][s2])
def test_find_files_nonrecurse():
files = librosa.util.find_files(
os.path.join(os.path.curdir, "tests"), recurse=False
)
assert len(files) == 0
# fail if ext is not none, we're case-sensitive, and looking for WAV
@pytest.mark.parametrize("ext", ["WAV", ["WAV"]])
def test_find_files_case_sensitive(ext):
files = librosa.util.find_files(
os.path.join(os.path.curdir, "tests"), ext=ext, case_sensitive=True
)
# On windows, this test won't work
if platform.system() != "Windows":
assert len(files) == 0
@pytest.mark.parametrize("x_in", np.linspace(-2, 2, num=6))
@pytest.mark.parametrize("cast", [None, np.floor, np.ceil])
def test_valid_int(x_in, cast):
z = librosa.util.valid_int(x_in, cast)
assert isinstance(z, int)
if cast is None:
assert z == int(np.floor(x_in))
else:
assert z == int(cast(x_in))
@pytest.mark.parametrize("x", np.linspace(-2, 2, num=3))
@pytest.mark.parametrize("cast", [7])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_valid_int_fail(x, cast):
# Test with a non-callable cast operator
librosa.util.valid_int(x, cast)
@pytest.mark.parametrize(
"ivals",
[
np.asarray([[0, 1], [1, 2]]),
np.asarray([[0, 0], [1, 1]]),
np.asarray([[0, 2], [1, 2]]),
],
)
def test_valid_intervals(ivals):
librosa.util.valid_intervals(ivals)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize(
"ivals",
[
np.asarray([]),
np.arange(2),
np.ones((2, 2, 2)),
np.ones((2, 3)),
], # ndim=0 # ndim=1 # ndim=3
) # ndim=2, shape[1] != 2
def test_valid_intervals_badshape(ivals):
# fail if ndim != 2 or shape[1] != 2
librosa.util.valid_intervals(ivals)
@pytest.mark.xfail(raises=librosa.ParameterError)
@pytest.mark.parametrize("intval", [np.asarray([[0, 1], [2, 1]])])
def test_valid_intervals_fail(intval):
# Test for issue #712: intervals must have non-negative duration
librosa.util.valid_intervals(intval)
def test_warning_deprecated():
@librosa.util.decorators.deprecated("old_version", "new_version")
def __dummy():
return True
with warnings.catch_warnings(record=True) as out:
x = __dummy()
# Make sure we still get the right value
assert x is True
# And that the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert "deprecated" in str(out[0].message).lower()
def test_warning_moved():
@librosa.util.decorators.moved("from", "old_version", "new_version")
def __dummy():
return True
with warnings.catch_warnings(record=True) as out:
x = __dummy()
# Make sure we still get the right value
assert x is True
# And that the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert "moved" in str(out[0].message).lower()
def test_warning_rename_kw_pass():
warnings.resetwarnings()
warnings.simplefilter("always")
ov = librosa.util.Deprecated()
nv = 23
with warnings.catch_warnings(record=True) as out:
v = librosa.util.rename_kw("old", ov, "new", nv, "0", "1")
assert v == nv
# Make sure no warning triggered
assert len(out) == 0
def test_warning_rename_kw_fail():
warnings.resetwarnings()
warnings.simplefilter("always")
ov = 27
nv = 23
with warnings.catch_warnings(record=True) as out:
v = librosa.util.rename_kw("old", ov, "new", nv, "0", "1")
assert v == ov
# Make sure the warning triggered
assert len(out) > 0
# And that the category is correct
assert out[0].category is DeprecationWarning
# And that it says the right thing (roughly)
assert "renamed" in str(out[0].message).lower()
@pytest.mark.parametrize("idx", [np.arange(10, 90, 10), np.arange(10, 90, 15)])
@pytest.mark.parametrize("idx_min", [None, 5, 15])
@pytest.mark.parametrize("idx_max", [None, 85, 100])
@pytest.mark.parametrize("step", [None, 2])
@pytest.mark.parametrize("pad", [False, True])
def test_index_to_slice(idx, idx_min, idx_max, step, pad):
slices = librosa.util.index_to_slice(
idx, idx_min=idx_min, idx_max=idx_max, step=step, pad=pad
)
if pad:
if idx_min is not None:
assert slices[0].start == idx_min
if idx.min() != idx_min:
slices = slices[1:]
if idx_max is not None:
assert slices[-1].stop == idx_max
if idx.max() != idx_max:
slices = slices[:-1]
if idx_min is not None:
idx = idx[idx >= idx_min]
if idx_max is not None:
idx = idx[idx <= idx_max]
idx = np.unique(idx)
assert len(slices) == len(idx) - 1
for sl, start, stop in zip(slices, idx, idx[1:]):
assert sl.start == start
assert sl.stop == stop
assert sl.step == step
@pytest.mark.parametrize("aggregate", [None, np.mean, np.sum])
@pytest.mark.parametrize(
"ndim,axis", [(1, 0), (1, -1), (2, 0), (2, 1), (2, -1), (3, 0), (3, 2), (3, -1)]
)
def test_sync(aggregate, ndim, axis):
data = np.ones([6] * ndim, dtype=np.float)
# Make some slices that don't fill the entire dimension
slices = [slice(1, 3), slice(3, 4)]
dsync = librosa.util.sync(data, slices, aggregate=aggregate, axis=axis)
# Check the axis shapes
assert dsync.shape[axis] == len(slices)
s_test = list(dsync.shape)
del s_test[axis]
s_orig = list(data.shape)
del s_orig[axis]
assert s_test == s_orig
# The first slice will sum to 2 and have mean 1
idx = [slice(None)] * ndim
idx[axis] = 0
if aggregate is np.sum:
assert np.allclose(dsync[idx], 2)
else:
assert np.allclose(dsync[idx], 1)
# The second slice will sum to 1 and have mean 1
idx[axis] = 1
assert np.allclose(dsync[idx], 1)
@pytest.mark.parametrize("aggregate", [np.mean, np.max])
def test_sync_slices(aggregate):
x = np.arange(8, dtype=float)
slices = [slice(0, 2), slice(2, 4), slice(4, 6), slice(6, 8)]
xsync = librosa.util.sync(x, slices, aggregate=aggregate)
if aggregate is np.mean:
assert np.allclose(xsync, [0.5, 2.5, 4.5, 6.5])
elif aggregate is np.max:
assert np.allclose(xsync, [1, 3, 5, 7])
else:
assert False
@pytest.mark.parametrize("aggregate", [np.mean, np.max])
@pytest.mark.parametrize("atype", [list, np.asarray])
def test_sync_frames(aggregate, atype):
x = np.arange(8, dtype=float)
frames = atype([0, 2, 4, 6, 8])
xsync = librosa.util.sync(x, frames, aggregate=aggregate)
if aggregate is np.mean:
assert np.allclose(xsync, [0.5, 2.5, 4.5, 6.5])
elif aggregate is np.max:
assert np.allclose(xsync, [1, 3, 5, 7])
else:
assert False
@pytest.mark.parametrize("atype", [list, np.asarray])
@pytest.mark.parametrize("pad", [False, True])
def test_sync_frames_pad(atype, pad):
x = np.arange(8, dtype=float)
frames = atype([2, 4, 6])
xsync = librosa.util.sync(x, frames, pad=pad)
if pad:
assert np.allclose(xsync, [0.5, 2.5, 4.5, 6.5])
else:
assert np.allclose(xsync, [2.5, 4.5])
@pytest.mark.parametrize("data", [np.mod(np.arange(135), 5)])
@pytest.mark.parametrize("idx", [["foo", "bar"], [None], [slice(None), None]])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_sync_fail(data, idx):
librosa.util.sync(data, idx)
@pytest.mark.parametrize("power", [1, 2, 50, 100, np.inf])
@pytest.mark.parametrize("split_zeros", [False, True])
def test_softmask(power, split_zeros):
srand()
X = np.abs(np.random.randn(10, 10))
X_ref = np.abs(np.random.randn(10, 10))
# Zero out some rows
X[3, :] = 0
X_ref[3, :] = 0
M = librosa.util.softmask(X, X_ref, power=power, split_zeros=split_zeros)
assert np.all(0 <= M) and np.all(M <= 1)
if split_zeros and np.isfinite(power):
assert np.allclose(M[3, :], 0.5)
else:
assert not np.any(M[3, :]), M[3]
def test_softmask_int():
X = 2 * np.ones((3, 3), dtype=np.int32)
X_ref = np.vander(np.arange(3))
M1 = librosa.util.softmask(X, X_ref, power=1)
M2 = librosa.util.softmask(X_ref, X, power=1)
assert np.allclose(M1 + M2, 1)
@pytest.mark.parametrize(
"x,x_ref,power,split_zeros",
[
(-np.ones(3), np.ones(3), 1, False),
(np.ones(3), -np.ones(3), 1, False),
(np.ones(3), np.ones(4), 1, False),
(np.ones(3), np.ones(3), 0, False),
(np.ones(3), np.ones(3), -1, False),
],
)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_softmask_fail(x, x_ref, power, split_zeros):
librosa.util.softmask(x, x_ref, power=power, split_zeros=split_zeros)
@pytest.mark.parametrize(
"x,value",
[
(1, np.finfo(np.float32).tiny),
(np.ones(3, dtype=int), np.finfo(np.float32).tiny),
(np.ones(3, dtype=np.float32), np.finfo(np.float32).tiny),
(1.0, np.finfo(np.float64).tiny),
(np.ones(3, dtype=np.float64), np.finfo(np.float64).tiny),
(1j, np.finfo(np.complex128).tiny),
(np.ones(3, dtype=np.complex64), np.finfo(np.complex64).tiny),
(np.ones(3, dtype=np.complex128), np.finfo(np.complex128).tiny),
],
)
def test_tiny(x, value):
assert value == librosa.util.tiny(x)
def test_util_fill_off_diagonal_8_8():
# Case 1: Square matrix (N=M)
mut_x = np.ones((8, 8))
librosa.util.fill_off_diagonal(mut_x, 0.25)
gt_x = np.array(
[
[1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1],
]
)
assert np.array_equal(mut_x, gt_x)
assert np.array_equal(mut_x, gt_x.T)
def test_util_fill_off_diagonal_8_12():
# Case 2a: N!=M
mut_x = np.ones((8, 12))
librosa.util.fill_off_diagonal(mut_x, 0.25)
gt_x = np.array(
[
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
]
)
assert np.array_equal(mut_x, gt_x)
# Case 2b: (N!=M).T
mut_x = np.ones((8, 12)).T
librosa.util.fill_off_diagonal(mut_x, 0.25)
assert np.array_equal(mut_x, gt_x.T)
@pytest.mark.parametrize("dtype_A", [np.float32, np.float64])
@pytest.mark.parametrize("dtype_B", [np.float32, np.float64])
def test_nnls_vector(dtype_A, dtype_B):
srand()
# Make a random basis
A = np.random.randn(5, 7).astype(dtype_A)
# Make a random latent vector
x = np.random.randn(A.shape[1]) ** 2
B = A.dot(x).astype(dtype_B)
x_rec = librosa.util.nnls(A, B)
assert np.all(x_rec >= 0)
assert np.sqrt(np.mean((B - A.dot(x_rec)) ** 2)) <= 1e-6
@pytest.mark.parametrize("dtype_A", [np.float32, np.float64])
@pytest.mark.parametrize("dtype_B", [np.float32, np.float64])
@pytest.mark.parametrize("x_size", [3, 30])
def test_nnls_matrix(dtype_A, dtype_B, x_size):
srand()
# Make a random basis
A = np.random.randn(5, 7).astype(dtype_A)
# Make a random latent matrix
# when x_size is 3, B is 7x3 (smaller than A)
x = np.random.randn(A.shape[1], x_size) ** 2
B = A.dot(x).astype(dtype_B)
x_rec = librosa.util.nnls(A, B)
assert np.all(x_rec >= 0)
assert np.sqrt(np.mean((B - A.dot(x_rec)) ** 2)) <= 1e-5
@pytest.mark.parametrize("dtype_A", [np.float32, np.float64])
@pytest.mark.parametrize("dtype_B", [np.float32, np.float64])
@pytest.mark.parametrize("x_size", [16, 64, 256])
def test_nnls_multiblock(dtype_A, dtype_B, x_size):
srand()
# Make a random basis
A = np.random.randn(7, 1025).astype(dtype_A)
# Make a random latent matrix
# when x_size is 3, B is 7x3 (smaller than A)
x = np.random.randn(A.shape[1], x_size) ** 2
B = A.dot(x).astype(dtype_B)
x_rec = librosa.util.nnls(A, B)
assert np.all(x_rec >= 0)
assert np.sqrt(np.mean((B - A.dot(x_rec)) ** 2)) <= 1e-4
@pytest.fixture
def psig():
# [[0, 1, 2, 3, 4]]
# axis=1 or -1 ==> [-1.5, 1, 1, 1, -1.5]
# axis=0 ==> [0, 0, 0, 0, 0]
return np.arange(0, 5, dtype=float)[np.newaxis]
@pytest.mark.parametrize("edge_order", [1, 2])
@pytest.mark.parametrize("axis", [0, 1, -1])
def test_cyclic_gradient(psig, edge_order, axis):
grad = librosa.util.cyclic_gradient(psig, edge_order=edge_order, axis=axis)
assert grad.shape == psig.shape
assert grad.dtype == psig.dtype
# Check the values
if axis == 0:
assert np.allclose(grad, 0)
else:
assert np.allclose(grad, [-1.5, 1, 1, 1, -1.5])
def test_shear_dense():
E = np.eye(3)
E_shear = librosa.util.shear(E, factor=1, axis=0)
assert np.allclose(E_shear, np.asarray([[1, 0, 0], [0, 0, 1], [0, 1, 0]]))
E_shear = librosa.util.shear(E, factor=1, axis=1)
assert np.allclose(E_shear, np.asarray([[1, 0, 0], [0, 0, 1], [0, 1, 0]]))
E_shear = librosa.util.shear(E, factor=-1, axis=1)
assert np.allclose(E_shear, np.asarray([[1, 1, 1], [0, 0, 0], [0, 0, 0]]))
E_shear = librosa.util.shear(E, factor=-1, axis=0)
assert np.allclose(E_shear, np.asarray([[1, 0, 0], [1, 0, 0], [1, 0, 0]]))
@pytest.mark.parametrize("fmt", ["csc", "csr", "lil", "dok"])
def test_shear_sparse(fmt):
E = scipy.sparse.identity(3, format=fmt)
E_shear = librosa.util.shear(E, factor=1, axis=0)
assert E_shear.format == fmt
assert np.allclose(E_shear.toarray(), np.asarray([[1, 0, 0], [0, 0, 1], [0, 1, 0]]))
E_shear = librosa.util.shear(E, factor=1, axis=1)
assert E_shear.format == fmt
assert np.allclose(E_shear.toarray(), np.asarray([[1, 0, 0], [0, 0, 1], [0, 1, 0]]))
E_shear = librosa.util.shear(E, factor=-1, axis=1)
assert E_shear.format == fmt
assert np.allclose(E_shear.toarray(), np.asarray([[1, 1, 1], [0, 0, 0], [0, 0, 0]]))
E_shear = librosa.util.shear(E, factor=-1, axis=0)
assert E_shear.format == fmt
assert np.allclose(E_shear.toarray(), np.asarray([[1, 0, 0], [1, 0, 0], [1, 0, 0]]))
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_shear_badfactor():
librosa.util.shear(np.eye(3), factor=None)
def test_stack_contig():
x1 = np.ones(3)
x2 = -np.ones(3)
xs = librosa.util.stack([x1, x2], axis=0)
assert xs.flags["F_CONTIGUOUS"]
assert np.allclose(xs, [[1, 1, 1], [-1, -1, -1]])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_stack_fail_shape():
x1 = np.ones(3)
x2 = np.ones(2)
librosa.util.stack([x1, x2])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_stack_fail_empty():
librosa.util.stack([])
@pytest.mark.parametrize("axis", [0, 1, -1])
@pytest.mark.parametrize("x", [np.random.randn(5, 10, 20)])
def test_stack_consistent(x, axis):
xs = librosa.util.stack([x, x], axis=axis)
xsnp = np.stack([x, x], axis=axis)
assert np.allclose(xs, xsnp)
if axis != 0:
assert xs.flags["C_CONTIGUOUS"]
@pytest.mark.parametrize("key", ["trumpet", "brahms", "nutcracker", "choice"])
@pytest.mark.parametrize("hq", [False, True])
def test_example(key, hq):
fn = librosa.example(key, hq=hq)
assert os.path.exists(fn)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_example_fail():
librosa.example("no such track")
@pytest.mark.parametrize("key", ["trumpet", "brahms", "nutcracker", "choice", "fishin"])
def test_example_info(key):
librosa.util.example_info(key)
def test_list_examples():
librosa.util.list_examples()
@pytest.mark.parametrize(
"dtype,target",
[
(np.float32, np.complex64),
(np.float64, np.complex128),
(np.int32, np.complex64),
(np.complex128, np.complex128),
],
)
def test_dtype_r2c(dtype, target):
inf_type = librosa.util.dtype_r2c(dtype)
# better to do a bidirectional subtype test than strict equality here
assert np.issubdtype(inf_type, target) and np.issubdtype(target, inf_type)
@pytest.mark.parametrize(
"dtype,target",
[
(np.float32, np.float32),
(np.complex64, np.float32),
(np.int32, np.float32),
(np.complex128, np.float64),
],
)
def test_dtype_c2r(dtype, target):
inf_type = librosa.util.dtype_c2r(dtype)
# better to do a bidirectional subtype test than strict equality here
assert np.issubdtype(inf_type, target) and np.issubdtype(target, inf_type)
| bmcfee/librosa | tests/test_util.py | Python | isc | 37,829 | [
"Brian"
] | 27699310eff7cb5936f5217a64b63557df397a916abd10dac7e28b031f3a8cef |
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2016-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Python implementation of the tiled Q-transform scan.
This is a re-implementation of the original Q-transform scan from the Omega
pipeline, all credits for the original algorithm go to its
authors.
"""
import warnings
from math import (log, ceil, pi, isinf, exp)
import numpy
from numpy import fft as npfft
from ..utils import round_to_power
from ..segments import Segment
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__credits__ = 'Scott Coughlin <scott.coughlin@ligo.org>, ' \
'Alex Urban <alexander.urban@ligo.org>'
__all__ = ['QTiling', 'QPlane', 'QTile', 'QGram', 'q_scan']
# q-transform defaults
DEFAULT_FRANGE = (0, float('inf'))
DEFAULT_MISMATCH = 0.2
DEFAULT_QRANGE = (4, 64)
# -- object class definitions -------------------------------------------------
class QObject(object):
"""Base class for Q-transform objects
This object exists just to provide basic methods for all other
Q-transform objects.
"""
# pylint: disable=too-few-public-methods
def __init__(self, duration, sampling, mismatch=DEFAULT_MISMATCH):
self.duration = float(duration)
self.sampling = float(sampling)
self.mismatch = float(mismatch)
@property
def deltam(self):
"""Fractional mismatch between neighbouring tiles
:type: `float`
"""
return 2 * (self.mismatch / 3.) ** (1/2.)
class QBase(QObject):
"""Base class for Q-transform objects with fixed Q
This class just provides a property for Q-prime = Q / sqrt(11)
"""
def __init__(self, q, duration, sampling, mismatch=DEFAULT_MISMATCH):
super().__init__(duration, sampling, mismatch=mismatch)
self.q = float(q)
@property
def qprime(self):
"""Normalized Q `(q/sqrt(11))`
"""
return self.q / 11**(1/2.)
class QTiling(QObject):
"""Iterable constructor of `QPlane` objects
For a given Q-range, each of the resulting `QPlane` objects can
be iterated over.
Parameters
----------
duration : `float`
the duration of the data to be Q-transformed
qrange : `tuple` of `float`
`(low, high)` pair of Q extrema
frange : `tuple` of `float`
`(low, high)` pair of frequency extrema
sampling : `float`
sampling rate (in Hertz) of data to be Q-transformed
mismatch : `float`
maximum fractional mismatch between neighbouring tiles
"""
def __init__(self, duration, sampling,
qrange=DEFAULT_QRANGE,
frange=DEFAULT_FRANGE,
mismatch=DEFAULT_MISMATCH):
super().__init__(duration, sampling, mismatch=mismatch)
self.qrange = (float(qrange[0]), float(qrange[1]))
self.frange = [float(frange[0]), float(frange[1])]
qlist = list(self._iter_qs())
if self.frange[0] == 0: # set non-zero lower frequency
self.frange[0] = 50 * max(qlist) / (2 * pi * self.duration)
maxf = self.sampling / 2 / (1 + 11**(1/2.) / min(qlist))
if isinf(self.frange[1]):
self.frange[1] = maxf
elif self.frange[1] > maxf: # truncate upper frequency to maximum
warnings.warn('upper frequency of %.2f is too high for the given '
'Q range, resetting to %.2f'
% (self.frange[1], maxf))
self.frange[1] = maxf
@property
def qs(self): # pylint: disable=invalid-name
"""Array of Q values for this `QTiling`
:type: `numpy.ndarray`
"""
return numpy.array(list(self._iter_qs()))
@property
def whitening_duration(self):
"""The recommended data duration required for whitening
"""
return max(t.whitening_duration for t in self)
def _iter_qs(self):
"""Iterate over the Q values
"""
# work out how many Qs we need
cumum = log(self.qrange[1] / self.qrange[0]) / 2**(1/2.)
nplanes = int(max(ceil(cumum / self.deltam), 1))
dq = cumum / nplanes # pylint: disable=invalid-name
for i in range(nplanes):
yield self.qrange[0] * exp(2**(1/2.) * dq * (i + .5))
def __iter__(self):
"""Iterate over this `QTiling`
Yields a `QPlane` at each Q value
"""
for q in self._iter_qs():
yield QPlane(q, self.frange, self.duration, self.sampling,
mismatch=self.mismatch)
def transform(self, fseries, **kwargs):
"""Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane
"""
if not numpy.isfinite(fseries).all():
raise ValueError('Input signal contains non-numerical values')
weight = 1 + numpy.log10(self.qrange[1]/self.qrange[0]) / numpy.sqrt(2)
nind, nplanes, peak, result = (0, 0, 0, None)
# identify the plane with the loudest tile
for plane in self:
nplanes += 1
nind += sum([1 + row.ntiles * row.deltam for row in plane])
result = plane.transform(fseries, **kwargs)
if result.peak['energy'] > peak:
out = result
peak = out.peak['energy']
return (out, nind * weight / nplanes)
class QPlane(QBase):
"""Iterable representation of a Q-transform plane
For a given Q, an array of frequencies can be iterated over, yielding
a `QTile` each time.
Parameters
----------
q : `float`
the Q-value for this plane
frange : `tuple` of `float`
`(low, high)` range of frequencies for this plane
duration : `float`
the duration of the data to be Q-transformed
sampling : `float`
sampling rate (in Hertz) of data to be Q-transformed
mismatch : `float`
maximum fractional mismatch between neighbouring tiles
"""
def __init__(self, q, frange, duration, sampling,
mismatch=DEFAULT_MISMATCH):
super().__init__(q, duration, sampling, mismatch=mismatch)
self.frange = [float(frange[0]), float(frange[1])]
if self.frange[0] == 0: # set non-zero lower frequency
self.frange[0] = 50 * self.q / (2 * pi * self.duration)
if isinf(self.frange[1]): # set non-infinite upper frequency
self.frange[1] = self.sampling / 2 / (1 + 1/self.qprime)
def __iter__(self):
"""Iterate over this `QPlane`
Yields a `QTile` at each frequency
"""
# for each frequency, yield a QTile
for freq in self._iter_frequencies():
yield QTile(self.q, freq, self.duration, self.sampling,
mismatch=self.mismatch)
def _iter_frequencies(self):
"""Iterate over the frequencies of this `QPlane`
"""
# work out how many frequencies we need
minf, maxf = self.frange
fcum_mismatch = log(maxf / minf) * (2 + self.q**2)**(1/2.) / 2.
nfreq = int(max(1, ceil(fcum_mismatch / self.deltam)))
fstep = fcum_mismatch / nfreq
fstepmin = 1 / self.duration
# for each frequency, yield a QTile
last = None
for i in range(nfreq):
this = (
minf * exp(2 / (2 + self.q**2)**(1/2.) * (i + .5) * fstep) //
fstepmin * fstepmin
)
if this != last: # yield only unique elements
yield this
last = this
@property
def frequencies(self):
"""Array of central frequencies for this `QPlane`
:type: `numpy.ndarray`
"""
return numpy.array(list(self._iter_frequencies()))
@property
def farray(self):
"""Array of frequencies for the lower-edge of each frequency bin
:type: `numpy.ndarray`
"""
bandwidths = 2 * pi ** (1/2.) * self.frequencies / self.q
return self.frequencies - bandwidths / 2.
@property
def whitening_duration(self):
"""The recommended data duration required for whitening
"""
return round_to_power(self.q / (2 * self.frange[0]),
base=2, which=None)
def transform(self, fseries, norm=True, epoch=None, search=None):
"""Calculate the energy `TimeSeries` for the given `fseries`
Parameters
----------
fseries : `~gwpy.frequencyseries.FrequencySeries`
the complex FFT of a time-series data set
norm : `bool`, `str`, optional
normalize the energy of the output by the median (if `True` or
``'median'``) or the ``'mean'``, if `False` the output
is the complex `~numpy.fft.ifft` output of the Q-tranform
epoch : `~gwpy.time.LIGOTimeGPS`, `float`, optional
the epoch of these data, only used for metadata in the output
`TimeSeries`, and not requires if the input `fseries` has the
epoch populated.
search : `~gwpy.segments.Segment`, optional
search window of interest to determine the loudest Q-plane
Returns
-------
results : `QGram`
the complex energies of the Q-transform of the input `fseries`
at each frequency
See also
--------
QTile.transform
for details on the transform over a row of `(Q, frequency)` tiles
QGram
an object with energies populated over time-frequency tiles
"""
out = []
for qtile in self:
# get energy from transform
out.append(qtile.transform(fseries, norm=norm, epoch=epoch))
return QGram(self, out, search)
class QTile(QBase):
"""Representation of a tile with fixed Q and frequency
"""
def __init__(self, q, frequency, duration, sampling,
mismatch=DEFAULT_MISMATCH):
super().__init__(q, duration, sampling, mismatch=mismatch)
self.frequency = frequency
@property
def bandwidth(self):
"""The bandwidth for tiles in this row
:type: `float`
"""
return 2 * pi ** (1/2.) * self.frequency / self.q
@property
def ntiles(self):
"""The number of tiles in this row
:type: `int`
"""
tcum_mismatch = self.duration * 2 * pi * self.frequency / self.q
return round_to_power(tcum_mismatch / self.deltam,
base=2, which='upper')
@property
def windowsize(self):
"""The size of the frequency-domain window for this row
:type: `int`
"""
return 2 * int(self.frequency / self.qprime * self.duration) + 1
def _get_indices(self):
half = int((self.windowsize - 1) / 2)
return numpy.arange(-half, half + 1)
def get_window(self):
"""Generate the bi-square window for this row
Returns
-------
window : `numpy.ndarray`
"""
# real frequencies
wfrequencies = self._get_indices() / self.duration
# dimensionless frequencies
xfrequencies = wfrequencies * self.qprime / self.frequency
# normalize and generate bi-square window
norm = self.ntiles / (self.duration * self.sampling) * (
315 * self.qprime / (128 * self.frequency)) ** (1/2.)
return (1 - xfrequencies ** 2) ** 2 * norm
def get_data_indices(self):
"""Returns the index array of interesting frequencies for this row
"""
return numpy.round(self._get_indices() + 1 +
self.frequency * self.duration).astype(int)
@property
def padding(self):
"""The `(left, right)` padding required for the IFFT
:type: `tuple` of `int`
"""
pad = self.ntiles - self.windowsize
return (int((pad - 1)/2.), int((pad + 1)/2.))
def transform(self, fseries, norm=True, epoch=None):
"""Calculate the energy `TimeSeries` for the given fseries
Parameters
----------
fseries : `~gwpy.frequencyseries.FrequencySeries`
the complex FFT of a time-series data set
norm : `bool`, `str`, optional
normalize the energy of the output by the median (if `True` or
``'median'``) or the ``'mean'``, if `False` the output
is the energy (power) of the Q-tranform
epoch : `~gwpy.time.LIGOTimeGPS`, `float`, optional
the epoch of these data, only used for metadata in the output
`TimeSeries`, and not requires if the input `fseries` has the
epoch populated.
Returns
-------
energy : `~gwpy.timeseries.TimeSeries`
a `TimeSeries` of the energy from the Q-transform of
this tile against the data.
"""
from ..timeseries import TimeSeries
windowed = fseries[self.get_data_indices()] * self.get_window()
# pad data, move negative frequencies to the end, and IFFT
padded = numpy.pad(windowed, self.padding, mode='constant')
wenergy = npfft.ifftshift(padded)
# return a `TimeSeries`
if epoch is None:
epoch = fseries.epoch
tdenergy = npfft.ifft(wenergy)
cenergy = TimeSeries(tdenergy, x0=epoch,
dx=self.duration/tdenergy.size, copy=False)
energy = type(cenergy)(
cenergy.value.real ** 2. + cenergy.value.imag ** 2.,
x0=cenergy.x0, dx=cenergy.dx, copy=False)
if norm:
norm = norm.lower() if isinstance(norm, str) else norm
if norm in (True, 'median'):
narray = energy / energy.median()
elif norm in ('mean',):
narray = energy / energy.mean()
else:
raise ValueError("Invalid normalisation %r" % norm)
return narray.astype("float32", casting="same_kind", copy=False)
return energy
class QGram(object):
"""Store tile energies over an irregularly gridded plane
Parameters
----------
plane : `QPlane`
the time-frequency plane over which to populate
energies : `list` of `TimeSeries`
a list of signal energies for each row of tiles
search : `~gwpy.segments.Segment`, optional
search window of interest to determine the loudest tile
"""
def __init__(self, plane, energies, search):
self.plane = plane
self.energies = energies
self.peak = self._find_peak(search)
def _find_peak(self, search):
peak = {'energy': 0, 'snr': None, 'time': None, 'frequency': None}
for freq, energy in zip(self.plane.frequencies, self.energies):
if search is not None:
energy = energy.crop(*search)
maxidx = energy.value.argmax()
maxe = energy.value[maxidx]
if maxe > peak['energy']:
peak.update({
'energy': maxe,
'snr': (2 * maxe) ** (1/2.),
'time': energy.t0.value + energy.dt.value * maxidx,
'frequency': freq,
})
return peak
def interpolate(self, tres="<default>", fres="<default>", logf=False,
outseg=None):
"""Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
"""
from scipy.interpolate import (interp2d, InterpolatedUnivariateSpline)
from ..spectrogram import Spectrogram
if outseg is None:
outseg = self.energies[0].span
frequencies = self.plane.frequencies
dtype = self.energies[0].dtype
# build regular Spectrogram from peak-Q data by interpolating each
# (Q, frequency) `TimeSeries` to have the same time resolution
if tres == "<default>":
tres = abs(Segment(outseg)) / 1000.
xout = numpy.arange(*outseg, step=tres)
nx = xout.size
ny = frequencies.size
out = Spectrogram(numpy.empty((nx, ny), dtype=dtype),
t0=outseg[0], dt=tres, frequencies=frequencies)
# record Q in output
out.q = self.plane.q
# interpolate rows
for i, row in enumerate(self.energies):
xrow = numpy.arange(row.x0.value, (row.x0 + row.duration).value,
row.dx.value)
interp = InterpolatedUnivariateSpline(xrow, row.value)
out[:, i] = interp(xout).astype(dtype, casting="same_kind",
copy=False)
if fres is None:
return out
# interpolate the spectrogram to increase its frequency resolution
# --- this is done because Duncan doesn't like interpolated images
# since they don't support log scaling
interp = interp2d(xout, frequencies, out.value.T, kind='cubic')
if not logf:
if fres == "<default>":
fres = .5
outfreq = numpy.arange(
self.plane.frange[0], self.plane.frange[1], fres,
dtype=dtype)
else:
if fres == "<default>":
fres = 500
outfreq = numpy.geomspace(
self.plane.frange[0],
self.plane.frange[1],
num=int(fres),
)
new = type(out)(
interp(xout, outfreq).T.astype(
dtype, casting="same_kind", copy=False),
t0=outseg[0], dt=tres, frequencies=outfreq,
)
new.q = self.plane.q
return new
def table(self, snrthresh=5.5):
"""Represent this `QPlane` as an `EventTable`
Parameters
----------
snrthresh : `float`, optional
lower inclusive threshold on individual tile SNR to keep in the
table, default: 5.5
Returns
-------
out : `~gwpy.table.EventTable`
a table of time-frequency tiles on this `QPlane`
Notes
-----
Only tiles with signal energy greater than or equal to
`snrthresh ** 2 / 2` will be stored in the output `EventTable`.
"""
from ..table import EventTable
# get plane properties
freqs = self.plane.frequencies
bws = 2 * (freqs - self.plane.farray)
# collect table data as a recarray
names = ('time', 'frequency', 'duration', 'bandwidth', 'energy')
rec = numpy.recarray((0,), names=names, formats=['f8'] * len(names))
for f, bw, row in zip(freqs, bws, self.energies):
ind, = (row.value >= snrthresh ** 2 / 2.).nonzero()
new = ind.size
if new > 0:
rec.resize((rec.size + new,), refcheck=False)
rec['time'][-new:] = row.times.value[ind]
rec['frequency'][-new:] = f
rec['duration'][-new:] = row.dt.to('s').value
rec['bandwidth'][-new:] = bw
rec['energy'][-new:] = row.value[ind]
# save to a table
out = EventTable(rec, copy=False)
out.meta['q'] = self.plane.q
return out
# -- utilities ----------------------------------------------------------------
def q_scan(data, mismatch=DEFAULT_MISMATCH, qrange=DEFAULT_QRANGE,
frange=DEFAULT_FRANGE, duration=None, sampling=None,
**kwargs):
"""Transform data by scanning over a `QTiling`
This utility is provided mainly to allow direct manipulation of the
`QTiling.transform` output. Most users probably just want to use
:meth:`~gwpy.timeseries.TimeSeries.q_transform`, which wraps around this.
Parameters
----------
data : `~gwpy.timeseries.TimeSeries` or `ndarray`
the time- or frequency-domain input data
mismatch : `float`, optional
maximum allowed fractional mismatch between neighbouring tiles
qrange : `tuple` of `float`, optional
`(low, high)` range of Qs to scan
frange : `tuple` of `float`, optional
`(low, high)` range of frequencies to scan
duration : `float`, optional
duration (seconds) of input, required if `data` is not a `TimeSeries`
sampling : `float`, optional
sample rate (Hertz) of input, required if `data` is not a `TimeSeries`
**kwargs
other keyword arguments to be passed to :meth:`QTiling.transform`,
including ``'epoch'`` and ``'search'``
Returns
-------
qgram : `QGram`
the raw output of :meth:`QTiling.transform`
far : `float`
expected false alarm rate (Hertz) of white Gaussian noise with the
same peak energy and total duration as `qgram`
"""
from gwpy.timeseries import TimeSeries
# prepare input
if isinstance(data, TimeSeries):
duration = abs(data.span)
sampling = data.sample_rate.to('Hz').value
kwargs.update({'epoch': data.t0.value})
data = data.fft().value
# return a raw Q-transform and its significance
qgram, N = QTiling(duration, sampling, mismatch=mismatch, qrange=qrange,
frange=frange).transform(data, **kwargs)
far = 1.5 * N * numpy.exp(-qgram.peak['energy']) / duration
return (qgram, far)
| duncanmmacleod/gwpy | gwpy/signal/qtransform.py | Python | gpl-3.0 | 24,435 | [
"Gaussian"
] | b319ae99abf7a787e871dde9d4b310b2bb02ac9ef635975e4af08fb1b5a020a7 |
"""Active Tree Search
Author: Robert Lieck <science@robert-lieck.com>
"""
import os
import shutil
import random
import pydot
import sys
import logging
from logging import debug, info, warning, error
import numpy as np
from scipy.stats import multivariate_normal
from scipy.special import gamma, psi
from scipy.optimize import minimize
import theano
import theano.d3viz as d3v
import json
from math import floor, log10, sqrt
from IPython.display import display, Image, HTML
from environment import *
from helper_functions import *
logging.getLogger().setLevel(logging.INFO)
logging.getLogger(__name__+'.StateNode').setLevel(logging.WARNING)
logging.getLogger(__name__+'.ActiveSearchTree').setLevel(logging.WARNING)
logging.warning('Why do I need to print this to get logging output???')
class StateNode(object):
"""A Node in the active search tree"""
# flag that indicates whether functions need to be recompiled
value_functions_up_to_date = False
# precompiled value functions and their gradients
_theano_mean_state_value_func_ = None
_theano_mean_state_value_grad_ = None
_theano_mean_state_action_value_func_ = None
_theano_mean_state_action_value_grad_ = None
_theano_state_value_variance_func_ = None
_theano_state_value_variance_grad_ = None
_theano_state_action_value_variance_func_ = None
_theano_state_action_value_variance_grad_ = None
# same for the active learning objective
_theano_objective_func_ = None
_theano_objective_grad_ = None
@staticmethod
def _compile_value_functions_():
"""
This function defines the mean and variance of the state and state-action
values as symbolic theano functions. It then compiles these functions and their
(automatically computed) gradients into callable objects.
"""
# don't recompile if up to date
if StateNode.value_functions_up_to_date:
return
# print info
logging.getLogger(__name__+'.StateNode').info("compile_value_functions")
# define theano variables
policy = theano.tensor.vector('policy')
children_mean_state_action_values = theano.tensor.vector('mean_state_action_values')
children_state_action_value_variances = theano.tensor.vector('state_action_value_variances')
children_mean_state_values = theano.tensor.vector('mean_state_values')
children_state_value_variances = theano.tensor.vector('state_value_variances')
mean_expected_rewards = theano.tensor.vector('mean_expected_rewards')
expected_reward_variances = theano.tensor.vector('expected_reward_variances')
mean_transition_probs = theano.tensor.vector('mean_transition_probs')
transition_prob_variances = theano.tensor.matrix('transition_prob_variances')
transition_prob_variances_diag = theano.tensor.vector('transition_prob_variances_diag')
discount = theano.tensor.scalar('discount')
# for convenience
r_plus_v = mean_expected_rewards + discount*children_mean_state_values
# mean action value
mean_state_action_value_vars = [mean_transition_probs, mean_expected_rewards, children_mean_state_values, discount]
mean_state_action_value = mean_transition_probs.dot(r_plus_v)
StateNode._theano_mean_state_action_value_func_ = theano.function(mean_state_action_value_vars, mean_state_action_value)
StateNode._theano_mean_state_action_value_grad_ = theano.function(mean_state_action_value_vars, theano.grad(mean_state_action_value, [children_mean_state_values]))
# variance of the action value
state_action_value_variance_vars = [mean_transition_probs, transition_prob_variances, transition_prob_variances_diag, expected_reward_variances, children_state_value_variances, mean_expected_rewards, children_mean_state_values, discount]
state_action_value_variance = (mean_transition_probs**2 + transition_prob_variances_diag).dot(expected_reward_variances + discount**2 * children_state_value_variances) + r_plus_v.dot(transition_prob_variances).dot(r_plus_v)
StateNode._theano_state_action_value_variance_func_ = theano.function(state_action_value_variance_vars, state_action_value_variance)
StateNode._theano_state_action_value_variance_grad_ = theano.function(state_action_value_variance_vars, theano.grad(state_action_value_variance, [children_mean_state_values, children_state_value_variances]))
# mean state value
mean_state_value_vars = [policy, children_mean_state_action_values]
mean_state_value = policy.dot(children_mean_state_action_values)
StateNode._theano_mean_state_value_func_ = theano.function(mean_state_value_vars, mean_state_value)
StateNode._theano_mean_state_value_grad_ = theano.function(mean_state_value_vars, theano.grad(mean_state_value, [children_mean_state_action_values]))
# state value variance
state_value_variance_vars = [policy, children_state_action_value_variances]
state_value_variance = (policy**2).dot(children_state_action_value_variances)
StateNode._theano_state_value_variance_func_ = theano.function(state_value_variance_vars, state_value_variance)
StateNode._theano_state_value_variance_grad_ = theano.function(state_value_variance_vars, theano.grad(state_value_variance, [children_state_action_value_variances]))
# objective
objective = state_value_variance
StateNode._theano_objective_func_ = theano.function(state_value_variance_vars, objective)
StateNode._theano_objective_grad_ = theano.function(state_value_variance_vars, theano.grad(objective, [children_state_action_value_variances]))
# flag as up-to-date
StateNode.value_functions_up_to_date = True
@staticmethod
def _mean_state_action_value_func_(mean_transition_probs, mean_expected_rewards, mean_state_values, discount):
StateNode._compile_value_functions_()
return StateNode._theano_mean_state_action_value_func_(mean_transition_probs, mean_expected_rewards, mean_state_values, discount)
@staticmethod
def _state_action_value_variance_func_(mean_transition_probs, transition_prob_variances, expected_reward_variances, state_value_variances, mean_expected_rewards, mean_state_values, discount):
StateNode._compile_value_functions_()
return StateNode._theano_state_action_value_variance_func_(mean_transition_probs, transition_prob_variances, np.diag(transition_prob_variances), expected_reward_variances, state_value_variances, mean_expected_rewards, mean_state_values, discount)
@staticmethod
def _mean_state_value_func_(policy, mean_state_action_values):
StateNode._compile_value_functions_()
return StateNode._theano_mean_state_value_func_(policy, mean_state_action_values)
@staticmethod
def _state_value_variance_func_(policy, state_action_value_variances):
StateNode._compile_value_functions_()
return StateNode._theano_state_value_variance_func_(policy, state_action_value_variances)
@staticmethod
def _objective_func_(policy, state_action_value_variances):
StateNode._compile_value_functions_()
return StateNode._theano_objective_func_(policy, state_action_value_variances)
@staticmethod
def _mean_state_action_value_grad_(mean_transition_probs, mean_expected_rewards, mean_state_values, discount):
StateNode._compile_value_functions_()
return StateNode._theano_mean_state_action_value_grad_(mean_transition_probs, mean_expected_rewards, mean_state_values, discount)
@staticmethod
def _state_action_value_variance_grad_(mean_transition_probs, transition_prob_variances, expected_reward_variances, state_value_variances, mean_expected_rewards, mean_state_values, discount):
StateNode._compile_value_functions_()
return StateNode._theano_state_action_value_variance_grad_(mean_transition_probs, transition_prob_variances, np.diag(transition_prob_variances), expected_reward_variances, state_value_variances, mean_expected_rewards, mean_state_values, discount)
@staticmethod
def _mean_state_value_grad_(policy, mean_state_action_values):
StateNode._compile_value_functions_()
return StateNode._theano_mean_state_value_grad_(policy, mean_state_action_values)
@staticmethod
def _state_value_variance_grad_(policy, state_action_value_variances):
StateNode._compile_value_functions_()
return StateNode._theano_state_value_variance_grad_(policy, state_action_value_variances)
@staticmethod
def _objective_grad_(policy, state_action_value_variances):
StateNode._compile_value_functions_()
return StateNode._theano_objective_grad_(policy, state_action_value_variances)
def __init__(self,
state,
parent,
discount,
alpha,
reward_prior_counts,
max_state_value_variance,
use_unbiased_variance,
terminal_prob,
is_terminal_state,
tree_policy):
"""
Initialize Node
:param state:
:param parent:
:param discount: discount for calculating values
:param alpha: concentration for Diriclet process (for computing expected changes)
:param reward_prior_counts: a list of (reward, weight) pairs
:param max_state_value_variance: maximum variance the state value can take (used for initialization)
:param use_unbiased_variance: whether to use an unbiased estimate of the variance
:param terminal_prob: probability to reach a terminal state (with zero value variance)
:param tree_policy: Should be one of 'mc', 'ucb1', 'soft-max', 'best-prob'.
:return:
"""
# compile value functions
StateNode._compile_value_functions_()
self._discount = discount
self._use_unbiased_variance = use_unbiased_variance
self._reward_prior_counts = reward_prior_counts
self._max_state_value_variance = max_state_value_variance
self._alpha = alpha
self._terminal_prob = terminal_prob
self._is_terminal_state = is_terminal_state
self._tree_policy = tree_policy
# characteristics for this node
self._state = state # state corresponding to this state node
self._parent = parent # parent state-node
self._children = {} # dict from (action, state)-pairs to the corresponding state-nodes
# state / state-action value and gradient (dependent variables)
self._mean_state_value = 0
self._mean_state_value_grad = 0
self._state_value_variance = 0 if is_terminal_state else max_state_value_variance
self._state_value_variance_grad = 0
self._mean_state_action_values = np.array([])
self._mean_state_action_values_grad = np.array([])
self._state_action_value_variances = np.array([])
self._state_action_value_variances_grad = np.array([])
# expected changes of the state / state-action value variance upon a new sample
self._expected_state_value_variance_change = 0 # change when sampling a new action
self._expected_state_action_value_variance_changes = np.array([]) # change when sampling the corresponding action
# reward, transition probabilities, policy (independent variables)
self._mean_expected_reward = 0
self._expected_reward_variance = 0
self._mean_transition_probs = {} # action-->array dict
self._transition_prob_variances = {} # action-->matrix dict
self._policy = np.array([])
# variables that need to be kept in sync with corresponding variables of children
self._children_mean_state_values = {} # action-->array dict
self._children_state_value_variances = {} # action-->array dict
self._children_mean_expected_rewards = {} # action-->array dict
self._children_expected_reward_variances = {} # action-->array dict
# transition counts, visit counts, reward statistics
self._action_counts = {} # action-->counts dict
self._transition_counts = {} # action-->(state-->counts) dict
self._reward_counts = sum([x[1] for x in self._reward_prior_counts])
self._reward_sum = sum([x[0] for x in self._reward_prior_counts])
self._squared_reward_sum = sum([x[0]**2 for x in self._reward_prior_counts])
# up-to-date flags
# ONLY set via the set_*_up_to_date() functions to ensure correct propagation of signals
# ALWAYS use the check_*_up_to_date() functions to check for the same reason
self.__value_up_to_date__ = True
self.__grad_up_to_date__ = True
self.__expected_changes_up_to_date__ = True
self.logger = logging.getLogger(__name__+'.StateNode')
# explicitly change the flag from True to False to trigger signals
self._set_value_up_to_date(False)
self._set_grad_up_to_date(False)
self._set_expected_changes_up_to_date(False)
def set_logger_level(self, level):
old_logger_level = self.logger.level
self.logger.setLevel(level)
return old_logger_level
def _set_value_up_to_date(self, value):
self.logger.debug('set_value_up_to_date({}) for {} {}'.format(value, self._state, id(self)))
# set flag
self.__value_up_to_date__ = value
# if false then grad isn't up to date either
if not value:
self._set_grad_up_to_date(value)
self._set_expected_changes_up_to_date(value)
# if set to false propagate to parents
if not value and self._parent is not None and self._parent._check_value_up_to_date():
self._parent._set_value_up_to_date(value)
def _set_grad_up_to_date(self, value):
self.logger.debug('set_grad_up_to_date({}) for {} {}'.format(value, self._state, id(self)))
self.__grad_up_to_date__ = value
# if set to false propagate to children
if not value:
for child in self._children.values():
if child._check_grad_up_to_date():
child._set_grad_up_to_date(value)
def _set_expected_changes_up_to_date(self, value):
self.logger.debug('set_expected_changes_up_to_date({}) for {} {}'.format(value, self._state, id(self)))
# set flag
self.__expected_changes_up_to_date__ = value
def _check_value_up_to_date(self):
self.logger.debug('check_value_up_to_date for {} {}'.format(self._state, id(self)))
# don't need to check children because set_value_up_to_data() propagates to parents
return self.__value_up_to_date__
def _check_grad_up_to_date(self):
self.logger.debug('check_grad_up_to_date for {} {}'.format(self._state, id(self)))
# don't need to check parent because set_grad_up_to_data() propagates to children
return self.__grad_up_to_date__
def _check_expected_changes_up_to_date(self):
self.logger.debug('check_expected_changes_up_to_date for {} {}'.format(self._state, id(self)))
return self.__expected_changes_up_to_date__
def _ensure_value_up_to_date(self):
self.logger.debug('update value for {} {}'.format(self._state, id(self)))
if not self._check_value_up_to_date():
self._compute_values()
self._set_value_up_to_date(True)
else:
self.logger.debug(' already up-to-date')
def _ensure_grad_up_to_date(self):
self.logger.debug('update grad for {} {}'.format(self._state, id(self)))
self._ensure_value_up_to_date()
if not self._check_grad_up_to_date():
if self._parent is not None:
self._parent._ensure_grad_up_to_date()
self._compute_grad()
self._set_grad_up_to_date(True)
else:
self.logger.debug(' already up-to-date')
def _ensure_expected_changes_up_to_date(self):
self.logger.debug('update expected_changes for {} {}'.format(self._state, id(self)))
self._ensure_value_up_to_date()
if not self._check_expected_changes_up_to_date():
self._compute_expected_changes()
self._set_expected_changes_up_to_date(True)
else:
self.logger.debug(' already up-to-date')
def _best_action_probability(self, n_min=10, n_max=10000, eps=5e-2):
if len(self._mean_state_action_values)==0:
return np.array([])
elif len(self._mean_state_action_values)==1:
return np.array([1.])
dist = multivariate_normal(mean=self._mean_state_action_values,
cov=np.diag(self._state_action_value_variances))
unit_vector = np.ones_like(self._mean_state_action_values)
unit_vector /= np.sqrt(unit_vector.dot(unit_vector))
Q_perp = unit_vector * unit_vector.dot(self._mean_state_action_values)
shift = Q_perp - self._mean_state_action_values
action_sums = np.zeros_like(self._mean_state_action_values)
action_square_sums = np.zeros_like(self._mean_state_action_values)
action_counts = np.zeros_like(self._mean_state_action_values)
estimates_std = np.ones_like(self._mean_state_action_values) + eps
counts = 0
while counts < n_max and (action_counts.min() < n_min or np.max(estimates_std) > eps):
counts += 1
sample = dist.rvs()
shifted_sample = sample + shift
prob = dist.pdf(sample)
shifted_prob = dist.pdf(shifted_sample)
if np.random.randint(2):
# unshifted
max_action = np.argmax(sample)
weight = 2*prob/(prob + shifted_prob)
action_sums[max_action] += weight
action_square_sums[max_action] += weight**2
action_counts[max_action] += 1
else:
# shifted
max_action = np.argmax(shifted_sample)
weight = 2*shifted_prob/(prob + shifted_prob)
action_sums[max_action] += weight
action_square_sums[max_action] += weight**2
action_counts[max_action] += 1
estimates_std = np.sqrt((action_square_sums/counts - (action_sums/counts)**2)/(counts-1))
# print('counts:', counts)
# print('probs:', action_sums / counts)
# print('std:', estimates_std)
if counts >= n_max:
self.logger.warning('Reached max number of samples Monte-Carlo integration ({})'.format(n_max))
# print(counts, estimates_std)
action_probs = action_sums / counts
action_probs /= action_probs.sum() # normalize
return action_probs
def mean_state_value(self):
self._ensure_value_up_to_date()
return self._mean_state_value*1
def state_value_variance(self):
self._ensure_value_up_to_date()
return self._state_value_variance*1
def mean_state_action_value(self, action):
self._ensure_value_up_to_date()
for a, val in zip(self._action_counts.keys(), self._mean_state_action_values):
if a == action:
return val
return None
def state_action_value_variance(self, action):
self._ensure_value_up_to_date()
for a, val in zip(self._action_counts.keys(), self._state_action_value_variances):
if a == action:
return val
return None
def objective(self):
self._ensure_value_up_to_date()
return self._objective_func_(policy=self._policy, state_action_value_variances=self._state_action_value_variances)
def mean_state_value_grad(self):
self._ensure_grad_up_to_date()
return self._mean_state_value_grad
def state_value_variance_grad(self):
self._ensure_grad_up_to_date()
return self._state_value_variance_grad
def mean_state_action_value_grad(self, action):
self._ensure_grad_up_to_date()
for a, val in zip(self._action_counts.keys(), self._mean_state_action_values_grad):
if a == action:
return val
return None
def state_action_value_variance_grad(self, action):
self._ensure_grad_up_to_date()
for a, val in zip(self._action_counts.keys(), self._state_action_value_variances_grad):
if a == action:
return val
return None
def expected_state_action_value_variance_change(self, action):
self._ensure_expected_changes_up_to_date()
for a, val in zip(self._action_counts.keys(), self._expected_state_action_value_variance_changes):
if a == action:
return val
return None
def expected_state_value_variance_change(self):
self._ensure_expected_changes_up_to_date()
return self._expected_state_value_variance_change
def policy(self, action):
self._ensure_value_up_to_date()
for a, val in zip(self._action_counts.keys(), self._policy):
if a == action:
return val
return None
def mean_expected_reward(self):
self._ensure_value_up_to_date()
return self._mean_expected_reward
def expected_reward_variance(self):
self._ensure_value_up_to_date()
return self._expected_reward_variance
def mean_transition_prob(self, action, state):
self._ensure_value_up_to_date()
for existing_action, state_dict in self._transition_counts.items():
if action == existing_action:
for state_idx, existing_state in enumerate(state_dict):
if state == existing_state:
return self._mean_transition_probs[action][state_idx]
return None
def transition_prob_variance(self, action, state_1, state_2):
self._ensure_value_up_to_date()
for existing_action, state_dict in self._transition_counts.items():
if action == existing_action:
for state_1_idx, existing_state_1 in enumerate(state_dict):
if state_1 == existing_state_1:
for state_2_idx, existing_state_2 in enumerate(state_dict):
if state_2 == existing_state_2:
return self._transition_prob_variances[action][state_1_idx][state_2_idx]
return None
def propagate_to_root(self):
"""
Generator expression that yields all nodes on the path from this one to the root (this one and the root)
"""
current_node = self
while current_node is not None:
yield current_node
current_node = current_node._parent
def propagate_to_leaves(self):
"""
Generator expression that yields all nodes that are descendants of this node (including the node itself)
"""
nodes_to_be_processed = [self]
for current_node in nodes_to_be_processed:
yield current_node
for child in current_node._children.values():
nodes_to_be_processed.append(child)
def _compute_values(self):
# old_logger_level = self.set_logger_level(logging.WARNING)
self.logger.info("compute_value for {} {}".format(self._state, id(self)))
# get values from children
for action, state_dict in self._transition_counts.items():
self._children_mean_state_values[action] = np.zeros(len(state_dict))
self._children_state_value_variances[action] = np.zeros(len(state_dict))
self._children_mean_expected_rewards[action] = np.zeros(len(state_dict))
self._children_expected_reward_variances[action] = np.zeros(len(state_dict))
for state_idx, state in enumerate(state_dict.keys()):
self._children_mean_state_values[action][state_idx] = \
self._children[(action, state)].mean_state_value()
self._children_state_value_variances[action][state_idx] = \
self._children[(action, state)].state_value_variance()
self._children_mean_expected_rewards[action][state_idx] = \
self._children[(action, state)].mean_expected_reward()
self._children_expected_reward_variances[action][state_idx] = \
self._children[(action, state)].expected_reward_variance()
# transition probabilities
for action, state_dict in self._transition_counts.items():
alpha = np.fromiter(state_dict.values(), dtype=np.float, count=len(state_dict))
self._mean_transition_probs[action] = Dirichlet.mean(alpha)
self._transition_prob_variances[action] = Dirichlet.variance(alpha)
# state-action values
self._mean_state_action_values = np.zeros(len(self._transition_counts))
self._state_action_value_variances = np.zeros(len(self._transition_counts))
for action_idx, action in enumerate(self._transition_counts.keys()):
self.logger.debug(" action: {}".format(action))
self.logger.debug(" mean_transition_probs: {}".format(self._mean_transition_probs[action]))
self.logger.debug(" transition_prob_variances: {}".format(self._transition_prob_variances[action]))
self.logger.debug(" mean_expected_rewards: {}".format(self._children_mean_expected_rewards[action]))
self.logger.debug(" expected_reward_variances: {}".format(self._children_expected_reward_variances[action]))
self.logger.debug(" mean_state_values: {}".format(self._children_mean_state_values[action]))
self.logger.debug(" state_value_variances: {}".format(self._children_state_value_variances[action]))
self.logger.debug(" discount: {}".format(self._discount))
self._mean_state_action_values[action_idx] = StateNode._mean_state_action_value_func_(
mean_transition_probs=self._mean_transition_probs[action],
mean_expected_rewards=self._children_mean_expected_rewards[action],
mean_state_values=self._children_mean_state_values[action],
discount=self._discount)
self._state_action_value_variances[action_idx] = StateNode._state_action_value_variance_func_(
mean_transition_probs=self._mean_transition_probs[action],
transition_prob_variances=self._transition_prob_variances[action],
expected_reward_variances=self._children_expected_reward_variances[action],
state_value_variances=self._children_state_value_variances[action],
mean_expected_rewards=self._children_mean_expected_rewards[action],
mean_state_values=self._children_mean_state_values[action],
discount=self._discount
)
# policy
if self._tree_policy=='mc':
# (Monte-Carlo)
self._policy = np.zeros(len(self._transition_counts))
count_sum = sum(self._action_counts.values())
for action_idx, action_counts in enumerate(self._action_counts.values()):
prob = action_counts/count_sum
self._policy[action_idx] = prob
elif self._tree_policy[0]=='ucb1':
# (UCB1)
action_counts = np.empty_like(self._mean_state_action_values)
for action_idx, action in enumerate(self._action_counts.keys()):
action_counts[action_idx] = self._action_counts[action]
bounds = self._mean_state_action_values + 2 * self._tree_policy[1] * np.sqrt(2*np.log(sum(self._action_counts.values())) / action_counts)
self._policy = np.zeros_like(self._mean_state_action_values)
if len(self._policy) > 0:
max = bounds.max()
for action_idx in range(len(bounds)):
self._policy[action_idx] = 1 if bounds[action_idx]==max else 0
self._policy /= sum(self._policy)
elif self._tree_policy[0]=='soft-max':
# (Soft-Max upper bound)
self._policy = self._mean_state_action_values + self._tree_policy[1]*np.sqrt(self._state_action_value_variances)
self._policy /= self._tree_policy[1]*np.sum(np.sqrt(self._state_action_value_variances)) # temperature base on standard deviation variances
self._policy = np.exp(self._policy)
self._policy /= np.sum(self._policy)
elif self._tree_policy[0]=='best-prob':
self._policy = self._best_action_probability()
else:
raise UserWarning("Unknown tree-policy '{}'".format(self._tree_policy))
if len(self._policy)>0 and abs(np.sum(self._policy)-1) > 1e-10:
raise UserWarning('Policy does not sum to 1 (sum={})'.format(np.sum(self._policy)))
# state value
if self._transition_counts:
self.logger.debug(" policy: {}".format(self._policy))
self.logger.debug(" mean_state_action_values: {}".format(self._mean_state_action_values))
self.logger.debug(" state_action_value_variances: {}".format(self._state_action_value_variances))
self._mean_state_value = StateNode._mean_state_value_func_(
policy=self._policy,
mean_state_action_values=self._mean_state_action_values
)
self._state_value_variance = StateNode._state_value_variance_func_(
policy=self._policy,
state_action_value_variances=self._state_action_value_variances
)
self.logger.debug(" mean_state_value: {}".format(self._mean_state_value))
self.logger.debug(" state_value_variance: {}".format(self._state_value_variance))
# reward
self._mean_expected_reward, _ , self._expected_reward_variance = mean_var(
counts=self._reward_counts,
sum_of_values=self._reward_sum,
sum_of_squared_values=self._squared_reward_sum,
unbiased_variance_estimate=self._use_unbiased_variance
)
# reset logging level
# self.set_logger_level(old_logger_level)
def _compute_grad(self):
self.logger.info("compute_grad for {} {}".format(self._state, id(self)))
parent = self._parent
# state value grad for root node
if self._parent is None:
# this is the root node.
self._mean_state_value_grad = 0
self._state_value_variance_grad = 0
self._mean_state_action_values_grad = np.zeros_like(self._mean_state_action_values)
self._state_action_value_variances_grad = self._objective_grad_(policy=self._policy, state_action_value_variances=self._state_action_value_variances)[0]
else:
self._mean_state_action_values_grad = np.zeros_like(self._mean_state_action_values)
self._state_action_value_variances_grad = np.zeros_like(self._state_action_value_variances)
# compute grad of the action values and for children state value
mean_state_value_grad = StateNode._mean_state_value_grad_(policy=self._policy, mean_state_action_values=self._mean_state_action_values)
state_value_variance_grad = StateNode._state_value_variance_grad_(policy=self._policy, state_action_value_variances=self._state_action_value_variances)
for action_idx, (action, state_dict) in enumerate(self._transition_counts.items()):
# grad of action values
if self._parent is not None:
# this is NOT the root node (root node was handled above)
self._mean_state_action_values_grad[action_idx] = self._mean_state_value_grad * mean_state_value_grad[0][action_idx]
self._state_action_value_variances_grad[action_idx] = self._state_value_variance_grad * state_value_variance_grad[0][action_idx]
# grad of children's state values
mean_state_action_value_grad = StateNode._mean_state_action_value_grad_(mean_transition_probs=self._mean_transition_probs[action],
mean_expected_rewards=self._children_mean_expected_rewards[action],
mean_state_values=self._children_mean_state_values[action],
discount=self._discount)
state_action_value_variance_grad = StateNode._state_action_value_variance_grad_(mean_transition_probs=self._mean_transition_probs[action],
transition_prob_variances=self._transition_prob_variances[action],
expected_reward_variances=self._children_expected_reward_variances[action],
state_value_variances=self._children_state_value_variances[action],
mean_expected_rewards=self._children_mean_expected_rewards[action],
mean_state_values=self._children_mean_state_values[action],
discount=self._discount)
for state_idx, state in enumerate(state_dict.keys()):
self._children[(action, state)]._mean_state_value_grad = \
self._mean_state_action_values_grad[action_idx] * mean_state_action_value_grad[0][state_idx] + \
self._state_action_value_variances_grad[action_idx] * state_action_value_variance_grad[0][state_idx]
self._children[(action, state)]._state_value_variance_grad = \
self._state_action_value_variances_grad[action_idx] * state_action_value_variance_grad[1][state_idx]
def _compute_expected_changes(self):
self.logger.info("compute_expected_change for {} {}".format(self._state, id(self)))
self._expected_state_action_value_variance_changes = -self._state_action_value_variances.copy()
# for all actions simulated the effect of drawing a sample
for action_idx, (action, state_dict) in enumerate(self._transition_counts.items()):
n = sum(state_dict.values())
# observation may be an already known state
for state_idx, ns in enumerate(state_dict.values()):
weight = ns/(self._alpha+n)
transition_counts = np.fromiter(state_dict.values(), dtype=np.float, count=len(state_dict))
transition_counts[state_idx] += 1
mean_transition_probs = Dirichlet.mean(transition_counts)
transition_prob_variances = Dirichlet.variance(transition_counts)
expected_reward_variances = self._children_expected_reward_variances[action].copy()
expected_reward_variances[state_idx] *= (ns+1)/(ns+2) * 1/((ns+1)**2) * (ns**2/(ns/(ns+1)) + ns/(ns+1))
state_action_value_variance = StateNode._state_action_value_variance_func_(
mean_transition_probs=mean_transition_probs,
transition_prob_variances=transition_prob_variances,
expected_reward_variances=expected_reward_variances,
state_value_variances=self._children_state_value_variances[action],
mean_expected_rewards=self._children_mean_expected_rewards[action],
mean_state_values=self._children_mean_state_values[action],
discount=self._discount
)
correction = 1/(ns+1) * mean_transition_probs[state_idx] * expected_reward_variances[state_idx]
self._expected_state_action_value_variance_changes[action_idx] += weight * (state_action_value_variance + correction)
# observation may also be a new state
ns = sum([x[1] for x in self._reward_prior_counts])
weight = self._alpha/(self._alpha+n)
transition_counts = np.empty(len(state_dict)+1)
state_value_variances = np.empty(len(state_dict)+1)
mean_state_values = np.empty(len(state_dict)+1)
mean_expected_rewards = np.empty(len(state_dict)+1)
expected_reward_variances = np.empty(len(state_dict)+1)
transition_counts[0:-1] = [x for x in state_dict.values()]
state_value_variances[0:-1] = self._children_state_value_variances[action]
mean_state_values[0:-1] = self._children_mean_state_values[action]
mean_expected_rewards[0:-1] = self._children_mean_expected_rewards[action]
expected_reward_variances[0:-1] = self._children_expected_reward_variances[action]
transition_counts[-1] = ns
state_value_variances[-1] = self._max_state_value_variance
mean_state_values[-1] = 0
mean_expected_rewards[-1], _ , expected_reward_variances[-1] = mean_var(
counts=ns,
sum_of_values=sum([x[0] for x in self._reward_prior_counts]),
sum_of_squared_values=sum([x[0]**2 for x in self._reward_prior_counts]),
unbiased_variance_estimate=self._use_unbiased_variance)
expected_reward_variances[-1] *= (ns+1)/(ns+2) * 1/((ns+1)**2) * (ns**2/(ns/(ns+1)) + ns/(ns+1))
mean_transition_probs = Dirichlet.mean(transition_counts)
transition_prob_variances = Dirichlet.variance(transition_counts)
state_action_value_variance = StateNode._state_action_value_variance_func_(
mean_transition_probs=mean_transition_probs,
transition_prob_variances=transition_prob_variances,
expected_reward_variances=expected_reward_variances,
state_value_variances=state_value_variances,
mean_expected_rewards=mean_expected_rewards,
mean_state_values=mean_state_values,
discount=self._discount
)
correction = 1/(ns+1) * mean_transition_probs[-1] * expected_reward_variances[-1]
self._expected_state_action_value_variance_changes[action_idx] += weight * (state_action_value_variance + correction)
# simulate the effect of sampling a new action by temporally adding a fake transition
old_mean_state_value = self.mean_state_value()
old_state_value_variance = self.state_value_variance()
self._transition_counts[None] = {None: 1}
self._action_counts[None] = 1
child_node = StateNode(state=None,
parent=self,
discount=self._discount,
alpha=self._alpha,
use_unbiased_variance=self._use_unbiased_variance,
reward_prior_counts=self._reward_prior_counts,
max_state_value_variance=self._max_state_value_variance,
terminal_prob=self._terminal_prob,
is_terminal_state=False,
tree_policy=self._tree_policy)
self._children[(None, None)] = child_node
new_state_value_variance = 0
# once with standard initialization
self._compute_values()
new_state_value_variance += (1-self._terminal_prob)*self._state_value_variance
# once assuming a terminal state is reached
child_node._state_value_variance = 0
self._compute_values()
new_state_value_variance += self._terminal_prob*self._state_value_variance
self._expected_state_value_variance_change = new_state_value_variance - old_state_value_variance
# remove fake transition
del self._transition_counts[None]
del self._action_counts[None]
del self._children[(None,None)]
self._compute_values()
if self._transition_counts:
# has children so state value variance should have been recomputed
if self._tree_policy[0]=='best-prob':
# only report major deviations but ignore small ones caused by Monte-Carlo integration
if np.abs(2*(self._mean_state_value-old_mean_state_value)/(self._mean_state_value+old_mean_state_value))>1e-2:
self.logger.warning('mean state value does not match: {} != {}'.format(self._mean_state_value, old_mean_state_value))
if np.abs(2*(self._state_value_variance-old_state_value_variance)/(self._state_value_variance+old_state_value_variance))>1e-2:
self.logger.warning('state value variance does not match: {} != {}'.format(self._state_value_variance, old_state_value_variance))
else:
# expect exact values
if self._mean_state_value != old_mean_state_value:
raise UserWarning('mean state value does not match: {} != {}'.format(self._mean_state_value, old_mean_state_value))
if self._state_value_variance != old_state_value_variance:
raise UserWarning('state value variance does not match: {} != {}'.format(self._state_value_variance, old_state_value_variance))
else:
# has not children --> need to reset state value and state value variance manually
self._mean_state_value = old_mean_state_value
self._state_value_variance = old_state_value_variance
def add_transition(self, action, state, reward, is_terminal_state, counts=1):
self.logger.info("add_transition: in {} action {} --> {}, r={} {}".format(self._state, action, state, reward, id(self)))
# assign transition counts in this node (initialize first if necessary)
if action not in self._transition_counts:
assert(action not in self._action_counts)
self._transition_counts[action] = {}
self._action_counts[action] = 0
if state not in self._transition_counts[action]:
self._transition_counts[action][state] = 0
self._transition_counts[action][state] += counts
self._action_counts[action] += counts
# add child node if necessary
child_node = None if (action, state) not in self._children else self._children[(action, state)]
is_new_child_node = child_node is None
if is_new_child_node:
self.logger.info(' new child')
child_node = StateNode(state=state,
parent=self,
discount=self._discount,
alpha=self._alpha,
use_unbiased_variance=self._use_unbiased_variance,
reward_prior_counts=self._reward_prior_counts,
max_state_value_variance=self._max_state_value_variance,
terminal_prob=self._terminal_prob,
is_terminal_state=is_terminal_state,
tree_policy=self._tree_policy)
self._children[(action, state)] = child_node
# assign visit counts and rewards for child node
child_node._reward_counts += counts
child_node._reward_sum += reward
child_node._squared_reward_sum += reward ** 2
# flag as outdated (this node is marked as outdated by child node)
child_node._set_value_up_to_date(False)
# return
return child_node, is_new_child_node
class ActiveSearchTree(object):
"""Search Tree"""
def __init__(self, environment,
discount,
alpha,
use_unbiased_variance,
reward_prior_counts,
max_state_value_variance,
terminal_prob,
tree_policy,
tmp_dir=None,
delete_tmp_dir=True):
"""Construct Search Tree"""
self.environment = environment
self._discount = discount
if tmp_dir is None:
self.tmp_dir = './tmp_' + random_string(10)
os.makedirs(self.tmp_dir, exist_ok=False)
else:
self.tmp_dir = tmp_dir
os.makedirs(self.tmp_dir, exist_ok=True)
self.delete_tmp_dir = delete_tmp_dir
self.root_node = StateNode(state=environment.state,
parent=None,
discount=self._discount,
alpha=alpha,
use_unbiased_variance=use_unbiased_variance,
max_state_value_variance=max_state_value_variance,
reward_prior_counts=reward_prior_counts,
terminal_prob=terminal_prob,
is_terminal_state=environment.is_terminal_state(),
tree_policy=tree_policy)
self.logger = logging.getLogger(__name__+'.ActiveSearchTree')
def __del__(self):
if self.delete_tmp_dir:
shutil.rmtree(self.tmp_dir)
def random_sample(self):
# collect all nodes
all_nodes = []
nodes_to_process = [self.root_node]
depth = 0 # todo remove, unused
while nodes_to_process:
new_nodes_to_process = []
for node in nodes_to_process:
all_nodes.append(node)
for new_node in node.children.values():
new_nodes_to_process.append(new_node)
nodes_to_process = new_nodes_to_process
depth += 1
# choose random node and action and perform a transition
sample_node = random.choice(all_nodes)
self.environment.state = sample_node.state
action = random.choice(self.environment.possible_actions())
state, reward = self.environment.transition(action)
sample_node.add_transition(action, state, reward)
def expand_full_tree(self, n_samples, max_samples=1000):
samples = 0
unsampled_nodes = set(self.all_nodes())
while unsampled_nodes:
node = unsampled_nodes.pop()
for _ in range(n_samples):
self.environment.state = node._state
for action in self.environment.possible_actions():
self.environment.state = node._state
state, reward = self.environment.transition(action)
child, new = node.add_transition(action, state, reward, self.environment.is_terminal_state())
if new and not self.environment.is_terminal_state():
unsampled_nodes.add(child)
samples += 1
if samples > max_samples:
self.logger.warning('Reached maximum number of samples ({}) while expanding tree.'.format(max_samples))
return
@staticmethod
def impact(grad, change):
return abs(grad*change)
def active_sample(self):
max_node = None
max_action = None
# make sure all actions from the root node were sampled at least once
self.environment.state = self.root_node._state
possible_actions = set(self.environment.possible_actions())
taken_actions = set(self.root_node._transition_counts.keys())
if possible_actions - taken_actions:
max_node = self.root_node
max_action = random.choice(list(possible_actions - taken_actions))
else:
# else find node and action with maximum impact
max_impact = None
for node in self.all_nodes():
self.environment.state = node._state
if self.environment.is_terminal_state():
continue
for action in self.environment.possible_actions():
grad = node.state_action_value_variance_grad(action)
var_change = node.expected_state_action_value_variance_change(action)
if var_change is None:
assert(node != self.root_node)
# action was not tried before
grad = node.state_value_variance_grad()
var_change = node.expected_state_value_variance_change()
impact = self.impact(grad=grad, change=var_change)
if max_node is None or impact > max_impact:
# Note: This does tie-breaking as follows: If there are nodes with unsampled
# actions the LAST of these is chosen. Otherwise, for equal impact the FIRST
# is chosen
max_node = node
max_action = action
max_impact = impact
# perform transition
if max_node is None:
raise UserWarning('Could not find a node to sample from')
else:
self.environment.state = max_node._state
state, reward = self.environment.transition(max_action)
max_node.add_transition(max_action, state, reward, self.environment.is_terminal_state())
print('transition {} / {} --> {} / {}'.format(max_node._state, max_action, state, reward))
def rollout(self, rollout_policy, value_estimation, exploration=2):
"""
Make one rollout in MCTS
:param rollout_policy: Policy use for rollout (currently only 'uct' is supported).
:param exploration: Exploration rate (Cp factor in UCT)
:param value_estimation: How to initialize leaf nodes. One of ['one-look-ahead' | 'trial-rollout' | 'store-rollout']
:return:
"""
current_node = self.root_node
is_leaf_node = False
self.logger.info('new rollout')
self.environment.state = current_node._state
return_list = []
while True:
# check terminal conditions
if value_estimation=='one-look-ahead':
if is_leaf_node or self.environment.is_terminal_state():
break
elif value_estimation=='trial-rollout':
if self.environment.is_terminal_state():
break
elif value_estimation=='store-rollout':
if self.environment.is_terminal_state():
break
else:
raise UserWarning("Unknown value estimation strategy '{}'".format(value_estimation))
# set state
if not is_leaf_node:
self.environment.state = current_node._state
self.logger.debug('state: {}'.format(current_node._state))
# choose new action or action with maximum upper bound
possible_actions = set(self.environment.possible_actions())
if is_leaf_node:
taken_actions = set()
else:
taken_actions = set(current_node._transition_counts.keys())
self.logger.debug('possible: {}'.format(possible_actions))
self.logger.debug('taken: {}'.format(taken_actions))
if possible_actions - taken_actions:
action = random.choice(list(possible_actions - taken_actions))
self.logger.debug('choosing {} (was not take before)'.format(action))
else:
max_upper_bound = None
action_list = []
for candidate_action, candidate_action_counts in current_node._action_counts.items():
self.logger.debug('action={}, Q={}, ns={}, nsa={}'.format(candidate_action,
current_node.mean_state_action_value(candidate_action),
sum(current_node._action_counts.values()),
candidate_action_counts))
if rollout_policy== 'uct':
bound = current_node.mean_state_action_value(candidate_action) + \
2 * exploration * \
np.sqrt(2*np.log(sum(current_node._action_counts.values())) / candidate_action_counts)
else:
raise UserWarning("Unknown rollout policy '{}'".format(rollout_policy))
if max_upper_bound is None or bound > max_upper_bound:
action_list = [candidate_action]
max_upper_bound = bound
elif bound == max_upper_bound:
action_list += [candidate_action]
action = random.choice(action_list)
self.logger.debug('choosing {}, with upper bound {}'.format(action, max_upper_bound))
# perform transition
state, reward = self.environment.transition(action)
# update node
if is_leaf_node and value_estimation=='trial-rollout':
return_list += [reward]
else:
current_node, is_leaf_node = current_node.add_transition(action=action,
state=state,
reward=reward,
is_terminal_state=self.environment.is_terminal_state())
if value_estimation=='trial-rollout':
return_value = 0
discount_factor = 1
for r in return_list:
discount_factor *= self._discount
return_value += discount_factor * r
current_node._mean_state_value = return_value
def all_nodes(self):
return self.root_node.propagate_to_leaves()
def check_grad(self, epsilon=1e-10, threshold=1e-5):
self.logger.info('check_grad')
# compute gradient analytically
grad = {}
for node in self.all_nodes():
Q_grad = {}
for action in node._transition_counts.keys():
Q_grad[action] = {'mean': node.mean_state_action_value_grad(action),
'var': node.state_action_value_variance_grad(action)}
grad[node] = {'mean': node.mean_state_value_grad(),
'var': node.state_value_variance_grad(),
'Q': Q_grad}
# get objective value
objective = self.root_node.objective()
# compute gradient via (forward) finite differences
for node in self.all_nodes():
# skip root node
if node == self.root_node:
self.logger.info('skipping root node {}'.format(node._state))
continue
# print state info only once
state_info_printed = False
# get old values
old_mean_state_value = node.mean_state_value()
old_state_value_variance = node.state_value_variance()
# compute gradient for mean_state_value
node._mean_state_value = old_mean_state_value + epsilon
node._set_value_up_to_date(False)
node.__value_up_to_date__ = True # prevent update
mean_state_value_grad = (self.root_node.objective() - objective)/epsilon
node._mean_state_value = old_mean_state_value # reset
# compute gradient for state_value_variance
node._state_value_variance = old_state_value_variance + epsilon
node._set_value_up_to_date(False)
node.__value_up_to_date__ = True # prevent update
state_value_variance_grad = (self.root_node.objective() - objective)/epsilon
node._state_value_variance = old_state_value_variance # reset
# check threshold
mean_state_value_deviation = abs(grad[node]['mean']-mean_state_value_grad)/max(objective,1)
state_value_variance_deviation = abs(grad[node]['var']-state_value_variance_grad)/max(objective,1)
if mean_state_value_deviation > threshold or state_value_variance_deviation > threshold:
if not state_info_printed:
self.logger.error('error in gradient')
self.logger.error(' state {}'.format(node._state))
self.logger.error(' objective {}'.format(objective))
state_info_printed = True
self.logger.error(' mean state value grad (analytic/numeric) {} / {}'.format(grad[node]['mean'], mean_state_value_grad))
self.logger.error(' state value variance grad (analytic/numeric) {} / {}'.format(grad[node]['var'], state_value_variance_grad))
for action_idx, action in enumerate(node._transition_counts.keys()):
# get old values
old_mean_state_action_value = node.mean_state_action_value(action)
old_state_action_value_variance = node.state_action_value_variance(action)
# compute gradient for mean_state_action_value
node._mean_state_action_values[action_idx] = old_mean_state_action_value + epsilon
node._set_value_up_to_date(False)
node._mean_state_value = StateNode._mean_state_value_func_(policy=node._policy, mean_state_action_values=node._mean_state_action_values)
node._state_value_variance = StateNode._state_value_variance_func_(policy=node._policy, state_action_value_variances=node._state_action_value_variances)
node.__value_up_to_date__ = True # prevent update
mean_state_action_value_grad = (self.root_node.objective() - objective)/epsilon
node._mean_state_action_values[action_idx] = old_mean_state_action_value # reset
# compute gradient for state_action_value_variance
node._state_action_value_variances[action_idx] = old_state_action_value_variance + epsilon
node._set_value_up_to_date(False)
node._mean_state_value = StateNode._mean_state_value_func_(policy=node._policy, mean_state_action_values=node._mean_state_action_values)
node._state_value_variance = StateNode._state_value_variance_func_(policy=node._policy, state_action_value_variances=node._state_action_value_variances)
node.__value_up_to_date__ = True # prevent update
state_action_value_variance_grad = (self.root_node.objective() - objective)/epsilon
node._state_action_value_variances[action_idx] = old_state_action_value_variance # reset
# check threshold
mean_state_action_value_deviation = abs(grad[node]['Q'][action]['mean']-mean_state_action_value_grad)/max(objective,1)
state_action_value_variance_deviation = abs(grad[node]['Q'][action]['var']-state_action_value_variance_grad)/max(objective,1)
if mean_state_action_value_deviation > threshold or state_action_value_variance_deviation > threshold:
if not state_info_printed:
self.logger.error('error in gradient')
self.logger.error(' state {}'.format(node._state))
self.logger.error(' objective {}'.format(objective))
state_info_printed = True
self.logger.error(' [action: {}] mean state action value grad (analytic/numeric) {} / {}'.format(action, grad[node]['Q'][action]['mean'], mean_state_action_value_grad))
self.logger.error(' [action: {}] state value action variance grad (analytic/numeric) {} / {}'.format(action, grad[node]['Q'][action]['var'], state_action_value_variance_grad))
if not state_info_printed:
self.logger.info('state {} OK'.format(node._state))
# flag node out-dated to force recomputation
node._set_value_up_to_date(False)
def to_file(self, file_name):
self.logger.info('write graph to file')
# function for formating numbers
show = lambda number: round_significant(number, 3, 1e-10)
# node indices
node_indices = {node: idx for idx, node in enumerate(self.all_nodes())}
# node levels
nodes_to_be_processed = [self.root_node]
node_levels = {self.root_node: 0}
for current_node in nodes_to_be_processed:
for child in current_node._children.values():
node_levels[child] = node_levels[current_node] + 1
nodes_to_be_processed.append(child)
# put together all node data
nodes = []
for node_idx, node in enumerate(self.all_nodes()):
try:
nodes.append({})
nodes[node_idx]['index'] = node_indices[node]
nodes[node_idx]['level'] = node_levels[node]
nodes[node_idx]['label text'] = str(node._state)
nodes[node_idx]['reward counts'] = node._reward_counts
nodes[node_idx]['visit counts'] = 0
if node._parent is not None:
nodes[node_idx]['visit counts'] = None
for action, state_dict in node._parent._transition_counts.items():
for state, counts in state_dict.items():
if node._parent._children[(action, state)] == node:
nodes[node_idx]['visit counts'] = counts
if nodes[node_idx]['visit counts'] is None:
raise UserWarning('Node does not exist among children of its parent')
nodes[node_idx]['expected reward'] = '{} +/- {} ({})'.format(show(node.mean_expected_reward()),
show(sqrt(show(node.expected_reward_variance()))),
show(node.expected_reward_variance()))
nodes[node_idx]['state value'] = '{} +/- {} ({})'.format(show(node.mean_state_value()),
show(sqrt(show(node.state_value_variance()))),
show(node.state_value_variance()))
nodes[node_idx]['state value grad'] = '{} / {}'.format(show(node.mean_state_value_grad()),
show(node.state_value_variance_grad()))
nodes[node_idx]['expected variance change'] = '{}'.format(show(node.expected_state_value_variance_change()))
self.environment.state = node._state
if len(self.environment.possible_actions()) == len(node._action_counts):
nodes[node_idx]['impact'] = 0
else :
nodes[node_idx]['impact'] = show(self.impact(grad=node.state_value_variance_grad(),
change=node.expected_state_value_variance_change()))
except ValueError:
print(node.mean_expected_reward(), node.expected_reward_variance(),
show(node.mean_expected_reward()), show(node.expected_reward_variance()))
print(node.mean_state_value(), node.state_value_variance(),
show(node.mean_state_value()), show(node.state_value_variance()))
raise
# collect edge data
edges = []
for node in self.all_nodes():
# build dict of action values and transition probabilities for this state node
action_values = {}
action_values_grad = {}
expected_variance_changes = {}
action_probs = {}
for action, state_dict in node._transition_counts.items():
action_values[action] = (node.mean_state_action_value(action),
node.state_action_value_variance(action))
action_values_grad[action] = (node.mean_state_action_value_grad(action),
node.state_action_value_variance_grad(action))
expected_variance_changes[action] = node.expected_state_action_value_variance_change(action)
action_probs[action] = {}
for state_idx, state in enumerate(state_dict):
action_probs[action][state] = (node.mean_transition_prob(action, state),
node.transition_prob_variance(action, state, state))
# add edges for all children
for (action, state), child in node._children.items():
edges.append({'source': node_indices[node],
'target': node_indices[child],
'label text': str(action),
'transition counts': node._transition_counts[action][state],
'policy': show(node.policy(action)),
'probability': '{} +/- {} ({})'.format(show(action_probs[action][state][0]),
show(sqrt(show(action_probs[action][state][1]))),
show(action_probs[action][state][1])),
'action value': '{} +/- {} ({})'.format(show(action_values[action][0]),
show(sqrt(show(action_values[action][1]))),
show(action_values[action][1])),
'action value grad': '{} / {}'.format(show(action_values_grad[action][0]),
show(action_values_grad[action][1])),
'expected variance change': '{}'.format(show(expected_variance_changes[action])),
'impact': show(self.impact(grad=node.state_action_value_variance_grad(action),
change=node.expected_state_action_value_variance_change(action)))})
# write to file
with open(file_name, 'w') as file:
json.dump({"nodes": nodes, "edges": edges}, file)
def show(self):
graph = pydot.Dot(graph_type='digraph')
# get all state and state-action values and expected rewards
mean_state_values = {}
state_value_variances = {}
mean_state_action_values = {}
state_action_value_variances = {}
expected_rewards = {}
for node in self.all_nodes():
# state value may be a symbolic variable or a constant value (for leaf nodes)
mean_state_values[node] = node.mean_state_value()
state_value_variances[node] = node.state_value_variance()
# action values (always a symbolic variable)
for action in node._action_counts.keys():
mean_state_action_values[(node, action)] = node.mean_state_action_value(action)
state_action_value_variances[(node, action)] = node.state_action_value_variance(action)
# expected rewards (shared variables)
expected_rewards[node] = node.mean_expected_reward()
# compute magnitude (maximum absolute value or reward) to renormalize
max_value = max(max(mean_state_values.values()), max(mean_state_action_values.values()), max(expected_rewards.values()))
min_value = min(min(mean_state_values.values()), min(mean_state_action_values.values()), min(expected_rewards.values()))
mag = max(abs(max_value), abs(min_value))
if mag == 0:
# prevent division by zero
mag = 1
# create all state-nodes and action-nodes
for node in self.all_nodes():
graph.add_node(pydot.Node('{}'.format(id(node)),
label='{} ({:.2g}~{:.2g})'.format(node._state,
float(mean_state_values[node]),
float(np.sqrt(state_value_variances[node]))),
shape='circle',
style='filled',
penwidth='5',
color='"{} {} 0.95"'.format((0.35 if expected_rewards[node] > 0 else 1),
abs(expected_rewards[node]/mag)),
fillcolor='"{} {} 0.95"'.format((0.35 if mean_state_values[node] > 0 else 1),
abs(mean_state_values[node]/mag))
))
for action in node._action_counts.keys():
graph.add_node(pydot.Node('{}_{}'.format(id(node), action),
label='{} ({:.2g}~{:.2g})'.format(action,
float(mean_state_action_values[(node, action)]),
float(np.sqrt(state_action_value_variances[(node, action)]))),
shape='diamond',
style='filled',
penwidth='1',
fillcolor='"{} {} 0.95"'.format((0.35 if mean_state_action_values[(node, action)] > 0 else 1),
abs(mean_state_action_values[(node, action)]/mag))))
# add all state->action and action->state edges
# todo: add reward variance
# todo: add transition prob variance (and covariance?)
for node in self.all_nodes():
connected_action_nodes = set()
for (action, state), child in node._children.items():
action_node_name = '{}_{}'.format(id(node), action)
# create state->action edge
if action_node_name not in connected_action_nodes:
connected_action_nodes.add(action_node_name)
graph.add_edge(pydot.Edge('{}'.format(id(node)),
action_node_name,
label='#{}|{:.2g}'.format(node._action_counts[action],
float(node.policy(action)))))
# create action->state edge
graph.add_edge(pydot.Edge(action_node_name,
'{}'.format(id(child)),
label='{}/#{}→{:.2g}'.format(child._reward_sum,
child._visit_counts,
child._reward_sum/child._visit_counts)))
# produce files
rnd = random_string(10)
dot_file = self.tmp_dir + '/plot_' + rnd + '.dot'
graph.write_dot(dot_file)
png_file = self.tmp_dir + '/plot_' + rnd + '.png'
graph.write_png(png_file)
display(Image(png_file))
print('Files are available at {} and {}'.format(dot_file, png_file))
def show_graph(self, theano_graph, png=True, html=True):
rnd = random_string(10)
if png:
png_file = self.tmp_dir + '/plot_' + rnd + '.png'
theano.printing.pydotprint(theano_graph, outfile=png_file, var_with_name_simple=True)
display(Image(png_file))
if html:
html_file = self.tmp_dir + '/plot_' + rnd + '.html'
d3v.d3viz(theano_graph, html_file)
display(HTML('<a target="_blank" href="' + html_file + '">View Graph</a>'))
| roboloni/active-tree-search | python/old_implementation/active_tree_search.py | Python | gpl-2.0 | 72,934 | [
"VisIt"
] | b1b79238ca1578efedd1b2daec163c57b36f0496af5da56d79e1001bf74464aa |
import os
from collections import Counter
from pypif.obj.common import Value, Property, Scalar
def Value_if_true(func):
'''Returns:
Value if x is True, else None'''
return lambda x: Value() if func(x) == True else None
class InvalidIngesterException(ValueError):
pass
class DFTParser(object):
'''Base class for all tools to parse a directory of output files from a DFT Calculation
To use this class, provide the path to a directory to containing the output files from a DFT calculation. Once
instantiated, call the methods provided by this class in order to retrieve the settings and results of the
calculation.
To get a list of names of the settings available for this a particular instance, call get_setting_functions(). These
methods return a pypif Value object.
To get a list of the names of results available via a particular instance, call get_result_functions(). These
methods return a pypif Property object.
Developer Notes
---------------
Settings and properties should...
return None if property or setting is not applicable, or if the test is a boolean and the value is false
Raise exception if there is an error that would benefit from user intervention
To add a new setting or value to the output, add a new entry to the dictionary returned by get_setting_functions()
or get_result_functions(). The key for each entry is a 'human-friendly' name of the result and the value is the
name of the function. This design was chosen because there is a single function for defining the human names of the
results, which are what serve as the tags in the pif file. In this way, the same property will be ensured to
have the same name in the pif.
'''
_converged = None
''' Whether this calculation has converged '''
def __init__(self, files):
'''Initialize a parser by defining the list of files that the parser can read from.
Input:
files - [str], list of files usable by this parser.
Raises:
InvalidIngesterException - If parser cannot find needed files
'''
self._files = files
@classmethod
def generate_from_directory(cls, directory):
"""Create a parser by defining which input files it will read from.
Input:
directory - str, directory to read from
files - str, list of files from which to search.
"""
files = [os.path.join(directory, f) for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f))]
return cls(files)
def get_setting_functions(self):
'''Get a dictionary containing the names of methods
that return settings of the calculation
Returns:
dict, where the key is the name of the setting,
and the value is function name of this parser
'''
return {
'XC Functional': 'get_xc_functional',
'Relaxed': 'is_relaxed',
'Cutoff Energy': 'get_cutoff_energy',
'k-Points per Reciprocal Atom': 'get_KPPRA',
'Spin-Orbit Coupling': 'uses_SOC',
'DFT+U': 'get_U_settings',
'vdW Interactions': 'get_vdW_settings',
'Pseudopotentials': 'get_pp_name',
'INCAR': 'get_incar',
'POSCAR': 'get_poscar',
}
def get_result_functions(self):
'''Get a dictionary describing the names of methods
that return results of the calculation
Returns:
dict, where the key is the name of a property,
and the value is the name of the function
'''
return {
'Converged': 'is_converged',
'Total Energy': 'get_total_energy',
'Band Gap Energy': 'get_band_gap',
'Pressure': 'get_pressure',
'Density of States': 'get_dos',
'Positions': 'get_positions',
'Forces': 'get_forces',
'Total force': 'get_total_force',
'Density': 'get_density',
'OUTCAR': 'get_outcar',
'Total magnetization': 'get_total_magnetization',
'Stresses': 'get_stresses',
'Number of atoms': 'get_number_of_atoms',
'Initial volume': 'get_initial_volume',
'Final volume': 'get_final_volume'
}
def get_name(self):
'''Get the name of this program'''
raise NotImplementedError
def get_version_number(self):
'''Get the version number of code that
created these output files
Returns:
string, Version number
'''
raise NotImplementedError
def get_output_structure(self):
'''Get the output structure, if available
Returns:
ase.Atoms - Output structure from this calculation
or None if output file not found
'''
raise NotImplementedError
def get_composition(self):
'''Get composition of output structure
Returns:
String - Composition based on output structure
'''
strc = self.get_output_structure()
counts = Counter(strc.get_chemical_symbols())
return ''.join(k if counts[k]==1 else '%s%d'%(k,counts[k]) \
for k in sorted(counts))
def get_density(self):
"""Compute the density from the output structure"""
strc = self.get_output_structure()
density = sum(strc.get_masses()) / strc.get_volume() * 1.660539040
return Property(scalars=[Scalar(value=density)], units="g/(cm^3)")
def get_positions(self):
strc = self.get_output_structure()
raw = strc.positions.tolist()
wrapped = [[Scalar(value=x) for x in y] for y in raw]
return Property(vectors=wrapped)
def get_cutoff_energy(self):
'''Read the cutoff energy from the output
Returns:
Value, cutoff energy (scalar) and units
'''
raise NotImplementedError
def uses_SOC(self):
'''Parse the output file to tell if spin-orbit coupling was used
Returns:
Blank Value if true, `None` otherwise
'''
raise NotImplementedError
def is_relaxed(self):
'''Parse the output file to tell if the structure was relaxed
Returns:
Blank Value if true, `None` otherwise
'''
raise NotImplementedError
def get_xc_functional(self):
'''Parse the output file to tell which exchange-correlation functional was used
Returns:
Value - where "scalars" is the name of the functional
'''
raise NotImplementedError
def is_converged(self):
'''Whether the calculation has converged
Returns: Property where "scalar" is a boolean indicating
'''
# Check for cached result
if self._converged is None:
self._converged = self._is_converged()
return Property(scalars=[Scalar(value=self._converged)])
def get_pp_name(self):
'''Read output to get the pseudopotentials names used for each elements
Returns:
Value where the key "scalars" is the list of pseudopotentials names
'''
raise NotImplementedError
def get_KPPRA(self):
'''Read output and calculate the number of k-points per reciprocal atom
Returns:
Value, number of k-points per reciprocal atom
'''
raise NotImplementedError
def get_U_settings(self):
'''Get the DFT+U settings, if used
Returns: Value, which could contain several keys
'Type' -> String, type of DFT+U employed
'Values' -> dict of Element -> (L, U, J)
Note: Returns None if DFT+U was not used
'''
raise NotImplementedError
def get_vdW_settings(self):
'''Get the vdW settings, if applicable
Returns: Value where `scalars` is the name of the vdW method. None if vdW was not used'''
raise NotImplementedError
# Operations for retrieving results
def _is_converged(self):
'''Read output to see whether it is converged
Hidden operation: self.is_converged() is the public
interface, which may draw from a converged result
Returns: boolean'''
raise NotImplementedError
def get_total_energy(self):
'''Get the total energy of the last ionic step
Returns: Property
'''
raise NotImplementedError
def get_band_gap(self):
'''Get the band gap energy
Returns: Property'''
raise NotImplementedError
def get_pressure(self):
'''Get the pressure acting on the system
Returns: Property, where pressure is a scalar'''
raise NotImplementedError
def get_dos(self):
'''Get the total density of states
Returns: Property where DOS is a vector, and the energy at which the DOS was evaluated is a condition'''
raise NotImplementedError
def get_stresses(self):
'''Get the stress tensor
Returns: Property where stresses is a 2d matrix'''
raise NotImplementedError
def get_total_force(self):
return None
def get_total_magnetization(self):
return None
def get_number_of_atoms(self):
"""Get the number of atoms in the calculated structure.
Returns: Property, where number of atoms is a scalar.
"""
strc = self.get_output_structure()
if not strc:
return None
return Property(scalars=[Scalar(value=len(strc))], units="/unit cell")
def get_initial_volume(self):
"""Get the volume of the initial input structure.
Returns: Property, where volume is a scalar.
"""
raise NotImplementedError
def get_final_volume(self):
"""Get the volume of the calculated structure at the end of the
calculation. If the calculation did not involve structural relaxation,
the final volume is identical to the initial volume.
Returns: Property, where volume is a scalar.
"""
raise NotImplementedError
| CitrineInformatics/pif-dft | dfttopif/parsers/base.py | Python | apache-2.0 | 10,541 | [
"ASE"
] | 31ce21826c42c2b4aabd82c8a7c8632304f1c0c97cda5e92ed200d351d4aedba |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Perform a grand canonical simulation of a system in contact
with a salt reservoir while maintaining a constant chemical potential.
"""
epilog = """
Takes two command line arguments as input: 1) the reservoir salt
concentration in units of 1/sigma^3 and 2) the excess chemical
potential of the reservoir in units of kT.
The excess chemical potential of the reservoir needs to be determined
prior to running the grand canonical simulation using the script called
widom_insertion.py which simulates a part of the reservoir at the
prescribed salt concentration. Be aware that the reservoir excess
chemical potential depends on all interactions in the reservoir system.
"""
import numpy as np
import argparse
import espressomd
from espressomd import reaction_ensemble
from espressomd import electrostatics
required_features = ["P3M", "EXTERNAL_FORCES", "WCA"]
espressomd.assert_features(required_features)
parser = argparse.ArgumentParser(epilog=__doc__ + epilog)
parser.add_argument('cs_bulk', type=float,
help="bulk salt concentration [1/sigma^3]")
parser.add_argument('excess_chemical_potential', type=float,
help="excess chemical potential [kT] "
"(obtained from Widom's insertion method)")
args = parser.parse_args()
# System parameters
#############################################################
cs_bulk = args.cs_bulk
excess_chemical_potential_pair = args.excess_chemical_potential
box_l = 50.0
# Integration parameters
#############################################################
system = espressomd.System(box_l=[box_l, box_l, box_l])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
system.time_step = 0.01
system.cell_system.skin = 0.4
temperature = 1.0
#############################################################
# Setup System #
#############################################################
# Particle setup
#############################################################
# type 0 = HA
# type 1 = A-
# type 2 = H+
for i in range(int(cs_bulk * box_l**3)):
system.part.add(pos=np.random.random(3) * system.box_l, type=1, q=-1)
system.part.add(pos=np.random.random(3) * system.box_l, type=2, q=1)
wca_eps = 1.0
wca_sig = 1.0
types = [0, 1, 2]
for type_1 in types:
for type_2 in types:
system.non_bonded_inter[type_1, type_2].wca.set_params(
epsilon=wca_eps, sigma=wca_sig)
RE = reaction_ensemble.ReactionEnsemble(
temperature=temperature, exclusion_radius=2.0, seed=3)
RE.add_reaction(
gamma=cs_bulk**2 * np.exp(excess_chemical_potential_pair / temperature),
reactant_types=[], reactant_coefficients=[], product_types=[1, 2],
product_coefficients=[1, 1], default_charges={1: -1, 2: +1})
print(RE.get_status())
system.setup_type_map([0, 1, 2])
RE.reaction(10000)
p3m = electrostatics.P3M(prefactor=2.0, accuracy=1e-3)
system.actors.add(p3m)
p3m_params = p3m.get_params()
for key, value in p3m_params.items():
print("{} = {}".format(key, value))
# Warmup
#############################################################
# warmup integration (steepest descent)
warm_steps = 20
warm_n_times = 20
min_dist = 0.9 * wca_sig
# minimize energy using min_dist as the convergence criterion
system.integrator.set_steepest_descent(f_max=0, gamma=1e-3,
max_displacement=0.01)
i = 0
while system.analysis.min_dist() < min_dist and i < warm_n_times:
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
system.integrator.run(warm_steps)
i += 1
print("minimization: {:+.2e}".format(system.analysis.energy()["total"]))
print()
system.integrator.set_vv()
# activate thermostat
system.thermostat.set_langevin(kT=temperature, gamma=.5, seed=42)
# MC warmup
RE.reaction(1000)
n_int_cycles = 10000
n_int_steps = 600
num_As = []
deviation = None
for i in range(n_int_cycles):
RE.reaction(10)
system.integrator.run(steps=n_int_steps)
num_As.append(system.number_of_particles(type=1))
if i > 2 and i % 50 == 0:
print("HA", system.number_of_particles(type=0), "A-",
system.number_of_particles(type=1), "H+",
system.number_of_particles(type=2))
concentration_in_box = np.mean(num_As) / box_l**3
deviation = (concentration_in_box - cs_bulk) / cs_bulk * 100
print("average num A {:.1f} +/- {:.1f}, average concentration {:.8f}, "
"deviation to target concentration {:.2f}%".format(
np.mean(num_As),
np.sqrt(np.var(num_As, ddof=1) / len(num_As)),
concentration_in_box, deviation))
| psci2195/espresso-ffans | samples/grand_canonical.py | Python | gpl-3.0 | 5,455 | [
"ESPResSo"
] | 2bdded75c6c36f6979c17cecafdd974dd60b287755b515e8c4016d446ae521f6 |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 5 13:21:34 2016
@author: ibackus
"""
import numpy as np
import os
import pynbody
SimArray = pynbody.array.SimArray
import diskpy
import runTest
import config
def summarizeResults(result, testName):
"""
Summarize test results.
results should just be the direct output of the compareTests for testName
"""
if testName in ('agora', 'agora-short'):
result, walls = result
print 'Mean fractional errors, averaged family-wise over all keys:'
print ' overall:', result['full']
print ' dark-matter:', result['dm']
print ' gas:', result['gas']
print ' metals:', result['metals']
print ' stars:', result['stars']
print 'walltime per step (run 1):', walls[0]
print 'walltime per step (run 2):', walls[1]
elif testName in ('sedov', 'shocktube', 'collapse'):
print 'Mean fractional errors:'
print ' density:', result['rhoErr']
print ' temperature:', result['tempErr']
print ' velocity:', result['vErr']
print ' position', result['xErr']
print 'walltime per step (run 1):', result['walls'][0]
print 'walltime per step (run 2):', result['walls'][1]
else:
raise ValueError, 'No method for printing testName {}'.format(testName)
def compareTests(directories, testName, runparamname='runparams.json'):
"""
A convenience utility to compare results for two runs of a test. This
is used to call one of the comparison functions.
Parameters
----------
directories : list or tuple-like
The two directories containing simulations to compare
testName : str
The name of the test. can be:
'shock' - uses compareShock(...)
'sedov' - uses compareSedov(...)
'agora' - uses compareAgora(...)
'agora-short' - uses compareAgora(...)
'collapse' - uses compareCollapse(...)
Returns
-------
results :
output of the functions above
"""
if testName in ('shock', 'collapse'):
return compareShock(directories, runparamname)
elif testName == 'sedov':
return compareSedov(directories, runparamname)
elif testName == 'shocktube':
return compareShock(directories, runparamname)
elif testName in ('agora', 'agora-short'):
return compareAgora(directories, runparamname)
elif testName == 'dustydisk':
return compareDustyDisk(directories, runparamname)
else:
raise ValueError, 'No function defined to compare testName: {}'\
.format(testName)
def compareDustyDisk(directories, runparamname='runparams.json'):
"""
Compare the results between two dustydisk tests
directories can be a list of directories or a list of simsnaps
Returns a dict of 'scores'. scores are the mean error for different
quantities on a family-level basis, i.e. position and velocity for
dark matter, gas is position, velocity, temperature, and density,
and metals is for all metals arrays
"""
fs, ICs, runPars, lognames = loadCompareDirs(directories, runparamname)
# shorten the snapshots
allSnaps = intersectSnaps(fs+ICs)
fs = allSnaps[0:2]
ICs = allSnaps[2:]
scores = {}
print '\nComparing full simulation'
xErr = posErr(fs, ICs)
vErr = velErr(fs, ICs)
scores['full'] = np.mean((xErr, vErr))
print '\nComparing gas'
gas = intersectSnaps([f.g for f in fs + ICs])
gas, gasICs = (gas[0:2], gas[2:])
xErr = posErr(gas, gasICs)
vErr = velErr(gas, gasICs)
tempErr = err1D(gas, 'temp')
rhoErr = err1D(gas, 'rho')
scores['gas'] = np.nanmean((xErr, vErr, tempErr, rhoErr))
otherkeys = [
'dustFrac',
'dustGrainSize',
'dustFracDot',
]
dustErrs = []
# Get keys present in both simulations
all_keys = list(set.intersection(*[set(g.all_keys()) for g in gas]))
for key in otherkeys:
if key in all_keys:
dustErrs.append(err1D(gas, key))
else:
print 'array {} missing'.format(key)
scores['dust'] = np.nanmean(dustErrs)
print '\n Comparing stars'
stars = intersectSnaps([f.s for f in fs + ICs])
stars, starICs = (stars[0:2], stars[2:])
xErr = posErr(stars, starICs)
vErr = velErr(stars, starICs)
scores['stars'] = np.nanmean((xErr, vErr))
all_keys = list(set.intersection(*[set(g.all_keys()) for g in stars]))
if 'dustFrac' in all_keys:
scores['star_dustFrac'] = err1D(stars, 'dustFrac')
print '\nWalltime:'
try:
walls = walltimes(lognames)
except IndexError:
"Could not load walltime"
walls = None
return scores, walls
def compareShock(directories, runparamname='runparams.json'):
"""
Compare the results between two shocktube tests.
Can also skip the loading if directories is a list of sims and run params etc
"""
if isinstance(directories[0], str):
fs, ICs, runPars, lognames = loadCompareDirs(directories, runparamname)
else:
fs, ICs, runPars, lognames = directories
xErr = posErr(fs, ICs)
vErr = velErr(fs, ICs)
gas = [f.g for f in fs]
gasICs = [f.g for f in ICs]
tempErr = err1D(gas, 'temp', gasICs)
rhoErr = err1D(gas, 'rho')
walls = walltimes(lognames)
return {'xErr': xErr,
'vErr': vErr,
'tempErr': tempErr,
'rhoErr': rhoErr,
'walls': walls}
def compareSedov(directories, runparamname='runparams.json', vthresh=1e-9):
"""
Compare the results between two sedov tests. Very similar to compareShock,
but filter out particles which aren't moving much (haven't had the blast
hit them yet)
If a particle in any of the final results has velocity > vthresh*v.max()
it will be kept
"""
fs, ICs, runPars, lognames = loadCompareDirs(directories, runparamname)
v = vectorMag3D(fs[0]['vel'])
mask = (v >= (v.max() * vthresh))
for f in fs[1:]:
v = vectorMag3D(f['vel'])
mask = (mask) | (v >= (v.max() * vthresh))
fs = [f[mask] for f in fs]
ICs = [f[mask] for f in ICs]
return compareShock((fs, ICs, runPars, lognames), runparamname)
def compareAgora(directories, runparamname='runparams.json'):
"""
Compare the results between two agora tests
directories can be a list of directories or a list of simsnaps
Returns a dict of 'scores'. scores are the mean error for different
quantities on a family-level basis, i.e. position and velocity for
dark matter, gas is position, velocity, temperature, and density,
and metals is for all metals arrays
"""
fs, ICs, runPars, lognames = loadCompareDirs(directories, runparamname)
# shorten the snapshots
allSnaps = intersectSnaps(fs+ICs)
fs = allSnaps[0:2]
ICs = allSnaps[2:]
scores = {}
print '\nComparing full simulation'
xErr = posErr(fs, ICs)
vErr = velErr(fs, ICs)
scores['full'] = np.mean((xErr, vErr))
print '\nComparing dark matter (this is pass trivially)'
darkMatter = intersectSnaps([f.dm for f in fs + ICs])
darkMatter, darkMatterICs = (darkMatter[0:2], darkMatter[2:])
xErr = posErr(darkMatter, darkMatterICs)
vErr = velErr(darkMatter, darkMatterICs)
scores['dm'] = np.mean((xErr, vErr))
print '\nComparing gas'
gas = intersectSnaps([f.g for f in fs + ICs])
gas, gasICs = (gas[0:2], gas[2:])
xErr = posErr(gas, gasICs)
vErr = velErr(gas, gasICs)
tempErr = err1D(gas, 'temp', gasICs)
rhoErr = err1D(gas, 'rho')
scores['gas'] = np.nanmean((xErr, vErr, tempErr, rhoErr))
otherkeys = ['metals',
'HI',
'OxMassFracdot',
'HeI',
'FeMassFracdot',
'Metalsdot',
'ESNRate',
'FeMassFrac',
'HeII',
'OxMassFrac']
metalsErrs = []
# Get keys present in both simulations
all_keys = list(set.intersection(*[set(g.all_keys()) for g in gas]))
for key in otherkeys:
if key in all_keys:
metalsErrs.append(err1D(gas, key))
else:
print 'array {} missing'.format(key)
scores['metals'] = np.nanmean(metalsErrs)
print '\n Comparing stars'
stars = intersectSnaps([f.s for f in fs + ICs])
stars, starICs = (stars[0:2], stars[2:])
xErr = posErr(stars, starICs)
vErr = velErr(stars, starICs)
scores['stars'] = np.nanmean((xErr, vErr))
print '\nWalltime:'
walls = walltimes(lognames)
return scores, walls
# ---------------------------------------------------------------------
# Generic utilities
# ---------------------------------------------------------------------
def notNanInf(x):
"""
Return a masked version of x, including only elements that are not nan
and are non inf
"""
mask = (~np.isnan(x)) & (~np.isinf(x))
return x[mask]
def nanInfMean(x):
"""
Return mean of x, excluding nan and inf vals
"""
return notNanInf(x).mean()
def simSize(f):
"""
estimates 'size' of simulation
"""
return np.sqrt(np.sum(f['pos'].std(0)**2))
def vectorMag3D(x):
"""
Magnitude of a 3d vector
"""
return np.sqrt((x**2).sum(1))
def walltimes(lognames, verbose=True):
"""
Get mean walltimes in 2 directories
"""
outs = [diskpy.pychanga.walltime(log, verbose=False).mean() for log in lognames]
if verbose:
colsize = 16
words = ['walltime'.ljust(colsize), 'log file']
print ''.join(words)
for walltime, logname in zip(outs, lognames):
print ''.join([str(walltime).ljust(colsize), logname])
return outs
def readLines(fname):
"""
Reads lines from file fname, ignoring lines commented by #, and returns
as a list
"""
with open(fname, 'r') as f:
flist = []
for line in f:
line = line.strip()
if line[0] != '#':
flist.append(line)
return flist
def getLogName(directory, paramname=None):
"""
gets the changa .log filename if possible
"""
paramname = runTest.findParam(directory, paramname)
param = diskpy.utils.configparser(paramname, 'param')
fprefix = diskpy.pychanga.getpar('achOutName', param)
logname = fprefix + '.log'
return os.path.join(directory, logname)
def findSnapshots(directory, paramname=None):
"""
Finds the output snapshots in directory and the output .log file
"""
paramname = runTest.findParam(directory, paramname)
param = diskpy.utils.configparser(paramname, 'param')
fprefix = diskpy.pychanga.getpar('achOutName', param)
fnames = diskpy.pychanga.get_fnames(fprefix, directory)
return fnames
def loadResults(directories, paramname=None):
"""
Loads the final output snapshots in 'directories'
"""
if not hasattr(paramname, '__iter__'):
paramnames = [paramname] * len(directories)
else:
paramnames = paramname
fnames = [findSnapshots(directory, paramname)
for directory, paramname in zip(directories, paramnames)]
resultnames = [flist[-1] for flist in fnames]
fs = [pynbody.load(fname) for fname in resultnames]
return fs
def loadICs(directories, paramname=None):
"""
"""
if hasattr(directories, '__iter__'):
if not hasattr(paramname, '__iter__'):
paramnames = [paramname] * len(directories)
else:
paramnames = paramname
return [loadICs(directory, paramname)
for directory, paramname in zip(directories, paramname)]
directory = directories
paramname = runTest.findParam(directory, paramname)
param = diskpy.utils.configparser(paramname, 'param')
ICname = param['achInFile']
ICname = os.path.join(directory, ICname)
IC = pynbody.load(ICname, paramfile=paramname)
# Access the iord
getIord(IC)
return IC
def loadRunPars(directories, runparams='runparams.json'):
"""
Loads the run params in directories
"""
runParNames = [os.path.join(directory, runparams) for directory in directories]
runPars = [runTest.loadRunParam(runParName) for runParName in runParNames]
return runPars
def loadArgDicts(fnames):
"""
Load the arg dicts (basically any dictionary saved via json) in fnames.
fnames can be a str or a list of strings
"""
if hasattr(fnames, '__iter__'):
return np.array([loadArgDicts(fname) for fname in fnames])
return runTest.loadRunParam(fnames)
def getFromDictList(dicts, key, default=None):
"""
Retrieve value of key in a list of dicts, returning a default value if not
present. Return as a numpy array
"""
return np.array([d.get(key, default) for d in dicts])
def flattenDictList(dicts, default=None):
"""
From a list of dictionaries, return a single dict where keys have been
flattened to numpy arrays, replacing non-present keys with default value
"""
allkeys = set()
for d in dicts:
allkeys = allkeys.union(set(d.keys()))
allkeys = list(allkeys)
flattened = {}
for key in allkeys:
flattened[key] = getFromDictList(dicts, key, default)
return flattened
def loadCompareDirs(directories, runparamname='runparams.json'):
"""
Loads final results and run parameters of two comparison directories
"""
runPars = loadRunPars(directories, runparamname)
paramnames = [p.get('paramname', None) for p in runPars]
fs = loadResults(directories, paramnames)
lognames = [getLogName(directory, paramname)
for directory, paramname in zip(directories, paramnames)]
ICs = loadICs(directories, paramnames)
return fs, ICs, runPars, lognames
def getIord(f):
"""
Tries to load particle iords if available, otherwise defaults to
0, 1, 2, ...
"""
try:
return f['iord']
except KeyError:
f['iord'] = np.arange(len(f))
return f['iord']
def intersectSnaps(fs, verbose=False):
"""
Returned index sub-snaps of only particles present in all snapshots in
fs
Requires 'iord' key
"""
common = getIord(fs[0])
for f in fs[1:]:
common = np.intersect1d(common, getIord(f), assume_unique=True)
unique = [np.setdiff1d(getIord(f), common, assume_unique=True) for f in fs]
fsSorted = [f[getIord(f).argsort()] for f in fs]
fsShort = []
for i, fSorted in enumerate(fsSorted):
mask = np.ones(len(fSorted), dtype=bool)
remove = unique[i]
if verbose:
print "removing {0} from {1}".format(remove, i)
for r in remove:
ind = np.argwhere(getIord(fSorted) == r)[0][0]
mask[ind] = False
fsShort.append(fSorted[mask])
return fsShort
# ---------------------------------------------------------------------
# Error/comparison utilities
# ---------------------------------------------------------------------
def dist(f1, f2):
"""
distance between particles in 2 sims (must be same length)
"""
return vectorMag3D(f1['pos'] - f2['pos'])
def fracDiff3D(x1, x2):
"""
fractional difference between 2 3d vectors
"""
x1mag = vectorMag3D(x1)
x2mag = vectorMag3D(x2)
dx = vectorMag3D(x2 - x1)
return dx/(0.5*(x1mag + x2mag))
def fracDiff1D(x1, x2):
"""
Fractional difference between 2 1d vectors
"""
dx = abs(x2 - x1)
meanx = 0.5*(abs(x1) + abs(x2))
return dx/meanx
def posErr(fs, ICs, verbose=True):
"""
Estimate the relative error in position between snapshots fs[0] and fs[1]
This is estimated by the mean of <the distance between particles in the two
sims, normalized by the distance travelled by the particles>
"""
f1, f2 = fs
IC1, IC2 = ICs
d12 = dist(f1, f2)
d1 = dist(f1, IC1)
d2 = dist(f2, IC2)
dmean = 0.5 * (d1 + d2)
errors = d12/dmean
xErr = nanInfMean(errors)
if verbose:
print 'Relative position error:', xErr
fracZero = zeroFraction(errors)
print " fraction of identical positions", fracZero
return xErr
def velErr(fs, ICs, verbose=True):
"""
Estimates the relative velocity error between two simulations
"""
v12diff = vectorMag3D(fs[0]['vel'] - fs[1]['vel'])
vdiffs = [vectorMag3D(f['vel'] - IC['vel']) for f, IC in zip(fs, ICs)]
vdiffmean = 0.5 * (vdiffs[0] + vdiffs[1])
errors = v12diff/vdiffmean
vErr = nanInfMean(errors)
if verbose:
print 'Relative velocity err:', vErr
fracZero = zeroFraction(errors)
print " fraction of identical velocities:", fracZero
return vErr
def err1D(fs, key, ICs=None, verbose=True):
"""
Estimate the mean error between fs[0][key] and fs[1][key] and ICs
Error is normalized by the mean difference with ICs if supplied, else
its normalized by the value of f[key]
"""
f1, f2 = fs
if ICs is not None:
IC1, IC2 = ICs
diff12 = abs(f1[key] - f2[key])
diffs = [abs(IC[key] - f[key]) for IC, f in zip(ICs, fs)]
diffmean = 0.5 * (diffs[0] + diffs[1])
errors = diff12/diffmean
err = nanInfMean(errors)
else:
errors = fracDiff1D(f1[key], f2[key])
err = nanInfMean(errors)
if verbose:
print "Relative '{0}' error:".format(key), err
fracZero = zeroFraction(errors)
print " fraction of identical elements:", fracZero
return err
def zeroFraction(x, neps=3):
"""
For x being a difference between two floats, check to see if
abs(x) < neps*eps where eps is the float precision
"""
eps = neps * np.finfo(x.dtype).eps
x = notNanInf(x)
zeros = (abs(x) < eps).sum()
return zeros/float(x.size)
| ibackus/compare-changa-builds | compchanga/compare.py | Python | mit | 18,591 | [
"BLAST"
] | 1b2f89db6c5441e6be3817500b012c6c3c0785fc83bec49d7d7c5df9635ffa00 |
#!/usr/bin/env python
import os
import logging
import argparse
import time
import pychemia
from pychemia.runner import get_jobs
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('pychemia')
logger.addHandler(logging.NullHandler())
logger.setLevel(logging.DEBUG)
description = """Launch VASP for non-evaluated entries in a PyChemia Database"""
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-t', '--host',
default='localhost', metavar='server', type=str,
help='Hostname or address (default: localhost)')
parser.add_argument('-o', '--port',
default=27017, metavar='port', type=int,
help='MongoDB port (default: 27017)')
parser.add_argument('-u', '--user',
default=None, metavar='username', type=str,
help='Username (default: None)')
parser.add_argument('-p', '--passwd',
default=None, metavar='password', type=str,
help='Password (default: None)')
parser.add_argument('-d', '--dbname',
default=None, metavar='dbname', type=str, nargs='+',
help='PyChemia Database name (default: None)')
parser.add_argument('-b', '--binary',
default='vasp', metavar='path', type=str,
help='VASP binary (default: vasp)')
parser.add_argument('-s', '--source_dir',
default=None, metavar='path', type=str, nargs='+',
help='Source Directory where KPOINTS, POSCAR, INCAR and POTCAR are (default: None)')
parser.add_argument('-r', '--replicaset',
default=None, metavar='name', type=str,
help='ReplicaSet (default: None)')
parser.add_argument('--ssl', action='store_true',
help='Use SSL to connect to MongoDB (default: No)')
parser.add_argument('--pbs_ppn',
default=1, metavar='N', type=int,
help='Number of MPI parallel processes (default: 1)')
parser.add_argument('--pbs_mail',
default=None, metavar='user@mail.server', type=str,
help='Mail address for PBS (default: None)')
parser.add_argument('--pbs_queue',
default=None, metavar='queue_name', type=str,
help='Queue for PBS (default: None)')
parser.add_argument('--pbs_nhours',
default=24, metavar='N', type=int,
help='Number of hours for PBS (default: 24)')
parser.add_argument('--pbs_user',
default=None, metavar='username', type=str,
help='Username for PBS (default: None)')
args = parser.parse_args()
if args.dbname is None:
parser.print_help()
exit(1)
print(args)
db_settings = {'host': args.host, 'port': args.port, 'ssl': args.ssl, 'replicaset': args.replicaset}
if args.user is not None:
if args.passwd is None:
raise ValueError('Password is mandatory if user is entered')
db_settings['user'] = args.user
db_settings['passwd'] = args.passwd
print('pyChemia Evaluator using VASP')
print('')
print('dbname : %s' % args.dbname)
print('source_dir: %s' % args.source_dir)
print('')
print('host : %s' % args.host)
print('port : %d' % args.port)
print('user : %s' % args.user)
print('replicaset: %s' % args.replicaset)
print('binary : %s' % str(args.binary))
print('ssl : %s' % str(args.ssl))
assert(len(args.dbname) == len(args.source_dir))
while True:
for idb in range(len(args.dbname)):
print('DATABASE: %s' % args.dbname[idb])
db_settings['name'] = args.dbname[idb]
pcdb = pychemia.db.get_database(db_settings)
popu = pychemia.population.NonCollinearMagMoms(pcdb, source_dir=args.source_dir[idb])
print('Number of candidates evaluated: %d' % len(popu.actives_evaluated))
print('Number of candidates not evaluated: %d' % len(popu.actives_no_evaluated))
to_compute = popu.actives_no_evaluated
print('Candidates to compute:')
for ijob in to_compute:
print(ijob)
jobs = get_jobs(args.pbs_user)
jobnames = [jobs[x]['Job_Name'] for x in jobs]
source_dir = args.source_dir[idb]
for ijob in to_compute:
if str(ijob) not in jobnames:
data_collected = popu.collect_data(ijob, workdir=source_dir + os.sep + str(ijob))
if not data_collected:
print('Preparing and submitting job: %s' % str(ijob))
popu.prepare_folder(ijob, workdir=source_dir + os.sep + str(ijob))
pbs = pychemia.runner.PBSRunner(source_dir + os.sep + str(ijob))
pbs.initialize(ppn=args.pbs_ppn, walltime=[args.pbs_nhours, 0, 0], mail=args.pbs_mail,
queue=args.pbs_queue)
pbs.set_template(source_dir+os.sep+'template.pbs')
pbs.write_pbs()
pbs.submit()
else:
print('Job %s is on queue or running' % str(ijob))
print('I will be waiting for 60 minutes before checking new candidates')
time.sleep(3600)
| MaterialsDiscovery/PyChemia | scripts/EvaluatorNonCollinear.py | Python | mit | 5,652 | [
"VASP"
] | ccd4a4b9a5a4183e0d06e087ba1cfd49b7fb3dacdb3e093610de61ecc7fabcdf |
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import os
import glob
import shutil
import subprocess
import numpy as np
import vtk
from Options import Option, Options
import AxisOptions
import FontOptions
import LegendOptions
def get_active_filenames(basename, pattern=None):
"""
Return a list of tuples containing 'active' filenames and modified times.
Inputs:
basename[str]: The base filename (e.g., file_out.e)
pattern[str]: (Optional) Additional files to consider via glob pattern (e.g., file_out.e-s*)
"""
# List of all matching filenames
filenames = [basename]
if pattern:
filenames += glob.glob(pattern)
filenames.sort()
# Minimum filename modified time
modified = os.path.getmtime(filenames[0]) if os.path.exists(filenames[0]) else 0
# Populate a list of tuples: (filename, modified time)
output = []
for filename in filenames:
current_modified = os.path.getmtime(filename) if os.path.exists(filename) else 0
if current_modified >= modified:
output.append((filename, current_modified))
return output
def copy_adaptive_exodus_test_files(testbase):
"""
A helper for copying test Exodus files.
"""
basename = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'tests', 'input',
'step10_micro_out.e'))
pattern = basename + '-s*'
testfiles = []
for src in [basename] + glob.glob(pattern):
_, ext = os.path.splitext(src)
dst = os.path.join(os.getcwd(), testbase + ext)
testfiles.append(dst)
shutil.copy(src, dst)
return sorted(testfiles)
def get_bounds_min_max(*all_bounds):
"""
Returns min,max bounds arrays provided a list of bounds sets.
"""
xmin = [float('inf'), float('inf'), float('inf')]
xmax = [float('-inf'), float('-inf'), float('-inf')]
for bounds in all_bounds:
for i, j in enumerate([0, 2, 4]):
xmin[i] = min(xmin[i], bounds[j])
for i, j in enumerate([1, 3, 5]):
xmax[i] = max(xmax[i], bounds[j])
return xmin, xmax
def get_bounds(*sources):
"""
Returns the bounding box for all supplied sources.
"""
bnds = []
for src in sources:
bnds.append(src.getVTKMapper().GetBounds())
return get_bounds_min_max(*bnds)
def compute_distance(*sources):
"""
Returns the distance across the bounding box for all supplied sources.
"""
xmin, xmax = get_bounds(*sources)
return np.linalg.norm(np.array(xmax) - np.array(xmin))
def get_min_max(*pairs):
"""
Retuns the min/max from a set of min/max pairs.
"""
xmin = float('inf')
xmax = float('-inf')
for x0, x1 in pairs:
xmin = min(xmin, x0)
xmax = max(xmax, x1)
return xmin, xmax
def print_camera(camera, prefix='camera', precision=4):
"""
Prints vtkCamera object to screen.
"""
if not isinstance(camera, vtk.vtkCamera):
print "You must supply a vtkCarmera object."
return
view_up = camera.GetViewUp()
position = camera.GetPosition()
focal = camera.GetFocalPoint()
def dump(precision, vec):
"""
Helper for dumping settings.
"""
p = str(precision)
frmt = ''.join(['{:', p, '.', p, 'f}'])
d = ''.join(['(', frmt, ', ', frmt, ', ', frmt, ')'])
return d.format(*vec)
return [prefix + '.SetViewUp' + dump(precision, view_up), prefix + '.SetPosition' + \
dump(precision, position), prefix + '.SetFocalPoint' + \
dump(precision, focal)]
def animate(pattern, output, delay=20, restart_delay=500, loop=True):
"""
Runs ImageMagic convert to create an animate gif from a series of images.
"""
filenames = glob.glob(pattern)
delay = [delay]*len(filenames)
delay[-1] = restart_delay
cmd = ['convert']
for d, f in zip(delay, filenames):
cmd += ['-delay', str(d), f]
if loop:
cmd += ['-loop', '0']
cmd += [output]
subprocess.call(cmd)
| backmari/moose | python/chigger/utils/__init__.py | Python | lgpl-2.1 | 4,971 | [
"MOOSE",
"VTK"
] | 14a6f51e40b91f57cb53fc37f5d2a443794666a5df3641276f2046002441b4ee |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import time
import sys
import numpy as np
from pyscf import lib
from pyscf import ao2mo
from pyscf.lib import logger
from pyscf.pbc.cc.kccsd_rhf import vector_to_nested, nested_to_vector
from pyscf.lib.parameters import LOOSE_ZERO_TOL, LARGE_DENOM
from pyscf.lib import linalg_helper
from pyscf.pbc.cc import eom_kccsd_ghf as eom_kgccsd
from pyscf.pbc.cc import kintermediates_rhf as imdk
from pyscf.pbc.cc.kccsd_rhf import _get_epq
from pyscf.pbc.cc.kccsd_t_rhf import _get_epqr
from pyscf.pbc.lib import kpts_helper
from pyscf.pbc.mp.kmp2 import (get_frozen_mask, get_nocc, get_nmo,
padded_mo_coeff, padding_k_idx)
einsum = lib.einsum
########################################
# EOM-IP-CCSD
########################################
def ipccsd_matvec(eom, vector, kshift, imds=None, diag=None):
'''2ph operators are of the form s_{ij}^{ b}, i.e. 'jb' indices are coupled.'''
# Ref: Nooijen and Snijders, J. Chem. Phys. 102, 1681 (1995) Eqs.(8)-(9)
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nkpts, nocc, nvir = imds.t1.shape
kconserv = imds.kconserv
vector = eom.mask_frozen(vector, kshift, const=0.0)
r1, r2 = eom.vector_to_amplitudes(vector)
# 1h-1h block
Hr1 = -einsum('ki,k->i', imds.Loo[kshift], r1)
# 1h-2h1p block
for kl in range(nkpts):
Hr1 += 2. * einsum('ld,ild->i', imds.Fov[kl], r2[kshift, kl])
Hr1 += -einsum('ld,lid->i', imds.Fov[kl], r2[kl, kshift])
for kk in range(nkpts):
kd = kconserv[kk, kshift, kl]
Hr1 += -2. * einsum('klid,kld->i', imds.Wooov[kk, kl, kshift], r2[kk, kl])
Hr1 += einsum('lkid,kld->i', imds.Wooov[kl, kk, kshift], r2[kk, kl])
Hr2 = np.zeros(r2.shape, dtype=np.result_type(imds.Wovoo.dtype, r1.dtype))
# 2h1p-1h block
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] -= einsum('kbij,k->ijb', imds.Wovoo[kshift, kb, ki], r1)
# 2h1p-2h1p block
if eom.partition == 'mp':
fock = imds.eris.fock
foo = fock[:, :nocc, :nocc]
fvv = fock[:, nocc:, nocc:]
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] += einsum('bd,ijd->ijb', fvv[kb], r2[ki, kj])
Hr2[ki, kj] -= einsum('li,ljb->ijb', foo[ki], r2[ki, kj])
Hr2[ki, kj] -= einsum('lj,ilb->ijb', foo[kj], r2[ki, kj])
elif eom.partition == 'full':
if diag is not None:
diag = eom.get_diag(imds=imds)
diag_matrix2 = vector_to_amplitudes(diag, nmo, nocc)[1]
Hr2 += diag_matrix2 * r2
else:
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] += einsum('bd,ijd->ijb', imds.Lvv[kb], r2[ki, kj])
Hr2[ki, kj] -= einsum('li,ljb->ijb', imds.Loo[ki], r2[ki, kj])
Hr2[ki, kj] -= einsum('lj,ilb->ijb', imds.Loo[kj], r2[ki, kj])
for kl in range(nkpts):
kk = kconserv[ki, kl, kj]
Hr2[ki, kj] += einsum('klij,klb->ijb', imds.Woooo[kk, kl, ki], r2[kk, kl])
kd = kconserv[kl, kj, kb]
Hr2[ki, kj] += 2. * einsum('lbdj,ild->ijb', imds.Wovvo[kl, kb, kd], r2[ki, kl])
Hr2[ki, kj] += -einsum('lbdj,lid->ijb', imds.Wovvo[kl, kb, kd], r2[kl, ki])
Hr2[ki, kj] += -einsum('lbjd,ild->ijb', imds.Wovov[kl, kb, kj], r2[ki, kl]) # typo in Ref
kd = kconserv[kl, ki, kb]
Hr2[ki, kj] += -einsum('lbid,ljd->ijb', imds.Wovov[kl, kb, ki], r2[kl, kj])
tmp = (2. * einsum('xyklcd,xykld->c', imds.Woovv[:, :, kshift], r2[:, :])
- einsum('yxlkcd,xykld->c', imds.Woovv[:, :, kshift], r2[:, :]))
Hr2[:, :] += -einsum('c,xyijcb->xyijb', tmp, t2[:, :, kshift])
return eom.mask_frozen(eom.amplitudes_to_vector(Hr1, Hr2), kshift, const=0.0)
def lipccsd_matvec(eom, vector, kshift, imds=None, diag=None):
'''2hp operators are of the form s_{kl}^{ d}, i.e. 'ld' indices are coupled.'''
# Ref: Nooijen and Snijders, J. Chem. Phys. 102, 1681 (1995) Eqs.(8)-(9)
assert(eom.partition == None)
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nkpts, nocc, nvir = imds.t1.shape
kconserv = imds.kconserv
vector = eom.mask_frozen(vector, kshift, const=0.0)
r1, r2 = eom.vector_to_amplitudes(vector)
Hr1 = -einsum('ki,i->k',imds.Loo[kshift],r1)
for ki, kb in itertools.product(range(nkpts), repeat=2):
kj = kconserv[kshift,ki,kb]
Hr1 -= einsum('kbij,ijb->k',imds.Wovoo[kshift,kb,ki],r2[ki,kj])
Hr2 = np.zeros(r2.shape, dtype=np.result_type(imds.Wovoo.dtype, r1.dtype))
for kl, kk in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kk,kshift,kl]
SWooov = (2. * imds.Wooov[kk,kl,kshift] -
imds.Wooov[kl,kk,kshift].transpose(1, 0, 2, 3))
Hr2[kk,kl] -= einsum('klid,i->kld',SWooov,r1)
Hr2[kk,kshift] -= (kk==kd)*einsum('kd,l->kld',imds.Fov[kk],r1)
Hr2[kshift,kl] += (kl==kd)*2.*einsum('ld,k->kld',imds.Fov[kl],r1)
for kl, kk in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kk,kshift,kl]
Hr2[kk,kl] -= einsum('ki,ild->kld',imds.Loo[kk],r2[kk,kl])
Hr2[kk,kl] -= einsum('lj,kjd->kld',imds.Loo[kl],r2[kk,kl])
Hr2[kk,kl] += einsum('bd,klb->kld',imds.Lvv[kd],r2[kk,kl])
for kj in range(nkpts):
kb = kconserv[kd, kl, kj]
SWovvo = (2. * imds.Wovvo[kl,kb,kd] -
imds.Wovov[kl,kb,kj].transpose(0, 1, 3, 2))
Hr2[kk,kl] += einsum('lbdj,kjb->kld',SWovvo,r2[kk,kj])
kb = kconserv[kd, kk, kj]
Hr2[kk,kl] -= einsum('kbdj,ljb->kld',imds.Wovvo[kk,kb,kd],r2[kl,kj])
Hr2[kk,kl] -= einsum('kbjd,jlb->kld',imds.Wovov[kk,kb,kj],r2[kj,kl])
ki = kconserv[kk,kj,kl]
Hr2[kk,kl] += einsum('klji,jid->kld',imds.Woooo[kk,kl,kj],r2[kj,ki])
tmp = np.zeros(nvir, dtype=np.result_type(imds.Wovoo.dtype, r1.dtype))
for ki, kj in itertools.product(range(nkpts), repeat=2):
kc = kshift
tmp += einsum('ijcb,ijb->c',t2[ki, kj, kc],r2[ki, kj])
for kl, kk in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kk,kshift,kl]
SWoovv = (2. * imds.Woovv[kl, kk, kd] -
imds.Woovv[kk, kl, kd].transpose(1, 0, 2, 3))
Hr2[kk, kl] -= einsum('lkdc,c->kld',SWoovv, tmp)
return eom.mask_frozen(eom.amplitudes_to_vector(Hr1, Hr2), kshift, const=0.0)
def ipccsd_diag(eom, kshift, imds=None, diag=None):
# Ref: Nooijen and Snijders, J. Chem. Phys. 102, 1681 (1995) Eqs.(8)-(9)
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nkpts, nocc, nvir = t1.shape
kconserv = imds.kconserv
Hr1 = -np.diag(imds.Loo[kshift])
Hr2 = np.zeros((nkpts, nkpts, nocc, nocc, nvir), dtype=t1.dtype)
if eom.partition == 'mp':
foo = eom.eris.fock[:, :nocc, :nocc]
fvv = eom.eris.fock[:, nocc:, nocc:]
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] = fvv[kb].diagonal()
Hr2[ki, kj] -= foo[ki].diagonal()[:, None, None]
Hr2[ki, kj] -= foo[kj].diagonal()[:, None]
else:
idx = np.arange(nocc)
for ki in range(nkpts):
for kj in range(nkpts):
kb = kconserv[ki, kshift, kj]
Hr2[ki, kj] = imds.Lvv[kb].diagonal()
Hr2[ki, kj] -= imds.Loo[ki].diagonal()[:, None, None]
Hr2[ki, kj] -= imds.Loo[kj].diagonal()[:, None]
if ki == kconserv[ki, kj, kj]:
Hr2[ki, kj] += np.einsum('ijij->ij', imds.Woooo[ki, kj, ki])[:, :, None]
Hr2[ki, kj] -= np.einsum('jbjb->jb', imds.Wovov[kj, kb, kj])
Wovvo = np.einsum('jbbj->jb', imds.Wovvo[kj, kb, kb])
Hr2[ki, kj] += 2. * Wovvo
if ki == kj: # and i == j
Hr2[ki, ki, idx, idx] -= Wovvo
Hr2[ki, kj] -= np.einsum('ibib->ib', imds.Wovov[ki, kb, ki])[:, None, :]
kd = kconserv[kj, kshift, ki]
Hr2[ki, kj] -= 2. * np.einsum('ijcb,jibc->ijb', t2[ki, kj, kshift], imds.Woovv[kj, ki, kd])
Hr2[ki, kj] += np.einsum('ijcb,ijbc->ijb', t2[ki, kj, kshift], imds.Woovv[ki, kj, kd])
return eom.amplitudes_to_vector(Hr1, Hr2)
def ipccsd_star_contract(eom, ipccsd_evals, ipccsd_evecs, lipccsd_evecs, kshift, imds=None):
'''For description of arguments, see `ipccsd_star_contract` in `kccsd_ghf.py`.'''
assert (eom.partition == None)
cpu1 = cpu0 = (time.clock(), time.time())
log = logger.Logger(eom.stdout, eom.verbose)
if imds is None:
imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
eris = imds.eris
fock = eris.fock
nkpts, nocc, nvir = t1.shape
nmo = nocc + nvir
dtype = np.result_type(t1, t2)
kconserv = eom.kconserv
fov = np.array([fock[ikpt, :nocc, nocc:] for ikpt in range(nkpts)])
foo = np.array([fock[ikpt, :nocc, :nocc].diagonal() for ikpt in range(nkpts)])
fvv = np.array([fock[ikpt, nocc:, nocc:].diagonal() for ikpt in range(nkpts)])
mo_energy_occ = np.array([eris.mo_energy[ki][:nocc] for ki in range(nkpts)])
mo_energy_vir = np.array([eris.mo_energy[ki][nocc:] for ki in range(nkpts)])
mo_e_o = mo_energy_occ
mo_e_v = mo_energy_vir
def contract_l3p(l1,l2,kptvec):
'''Create perturbed left 3p2h amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,kk,ka,kb]
'''
ki, kj, kk, ka, kb = kptvec
out = np.zeros((nocc,)*3 + (nvir,)*2, dtype=dtype)
if kk == kshift and kj == kconserv[ka,ki,kb]:
out += 0.5*np.einsum('ijab,k->ijkab', eris.oovv[ki,kj,ka], l1)
ke = kconserv[kb,ki,ka]
out += lib.einsum('eiba,jke->ijkab', eris.vovv[ke,ki,kb], l2[kj,kk])
km = kconserv[kshift,ki,ka]
out += -lib.einsum('kjmb,ima->ijkab', eris.ooov[kk,kj,km], l2[ki,km])
km = kconserv[ki,kb,kj]
out += -lib.einsum('ijmb,mka->ijkab', eris.ooov[ki,kj,km], l2[km,kk])
return out
def contract_pl3p(l1,l2,kptvec):
'''Create P(ia|jb) of perturbed left 3p2h amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,kk,ka,kb]
'''
kptvec = np.asarray(kptvec)
out = contract_l3p(l1,l2,kptvec)
out += contract_l3p(l1,l2,kptvec[[1,0,2,4,3]]).transpose(1,0,2,4,3) # P(ia|jb)
return out
def contract_r3p(r1,r2,kptvec):
'''Create perturbed right 3p2h amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,kk,ka,kb]
'''
ki, kj, kk, ka, kb = kptvec
out = np.zeros((nocc,)*3 + (nvir,)*2, dtype=dtype)
tmp = np.einsum('mbke,m->bke', eris.ovov[kshift,kb,kk], r1)
out += -lib.einsum('bke,ijae->ijkab', tmp, t2[ki,kj,ka])
ke = kconserv[kb,kshift,kj]
tmp = np.einsum('bmje,m->bej', eris.voov[kb,kshift,kj], r1)
out += -lib.einsum('bej,ikae->ijkab', tmp, t2[ki,kk,ka])
km = kconserv[ka,ki,kb]
tmp = np.einsum('mnjk,n->mjk', eris.oooo[km,kshift,kj], r1)
out += lib.einsum('mjk,imab->ijkab', tmp, t2[ki,km,ka])
ke = kconserv[kk,kshift,kj]
out += lib.einsum('eiba,kje->ijkab', eris.vovv[ke,ki,kb].conj(), r2[kk,kj])
km = kconserv[kk,kb,kj]
out += -lib.einsum('kjmb,mia->ijkab', eris.ooov[kk,kj,km].conj(), r2[km,ki])
km = kconserv[ki,kb,kj]
out += -lib.einsum('ijmb,kma->ijkab', eris.ooov[ki,kj,km].conj(), r2[kk,km])
return out
def contract_pr3p(r1,r2,kptvec):
'''Create P(ia|jb) of perturbed right 3p2h amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,kk,ka,kb]
'''
kptvec = np.asarray(kptvec)
out = contract_r3p(r1,r2,kptvec)
out += contract_r3p(r1,r2,kptvec[[1,0,2,4,3]]).transpose(1,0,2,4,3) # P(ia|jb)
return out
ipccsd_evecs = np.array(ipccsd_evecs)
lipccsd_evecs = np.array(lipccsd_evecs)
e_star = []
ipccsd_evecs, lipccsd_evecs = [np.atleast_2d(x) for x in [ipccsd_evecs, lipccsd_evecs]]
ipccsd_evals = np.atleast_1d(ipccsd_evals)
for ip_eval, ip_evec, ip_levec in zip(ipccsd_evals, ipccsd_evecs, lipccsd_evecs):
# Enforcing <L|R> = 1
l1, l2 = eom.vector_to_amplitudes(ip_levec, kshift)
r1, r2 = eom.vector_to_amplitudes(ip_evec, kshift)
ldotr = np.dot(l1, r1) + np.dot(l2.ravel(), r2.ravel())
# Transposing the l2 operator
l2T = np.zeros_like(l2)
for ki in range(nkpts):
for kj in range(nkpts):
ka = kconserv[ki,kshift,kj]
l2T[ki,kj] = l2[kj,ki].transpose(1,0,2)
l2 = (l2 + 2.*l2T)/3.
logger.info(eom, 'Left-right amplitude overlap : %14.8e + 1j %14.8e',
ldotr.real, ldotr.imag)
if abs(ldotr) < 1e-7:
logger.warn(eom, 'Small %s left-right amplitude overlap. Results '
'may be inaccurate.', ldotr)
l1 /= ldotr
l2 /= ldotr
deltaE = 0.0 + 1j*0.0
eij = (mo_e_o[:, None, :, None, None] + mo_e_o[None, :, None, :, None])
#mo_e_o[None, None, :, None, None, :])
for ka, kb in itertools.product(range(nkpts), repeat=2):
lijkab = np.zeros((nkpts,nkpts,nocc,nocc,nocc,nvir,nvir),dtype=dtype)
Plijkab = np.zeros((nkpts,nkpts,nocc,nocc,nocc,nvir,nvir),dtype=dtype)
rijkab = np.zeros((nkpts,nkpts,nocc,nocc,nocc,nvir,nvir),dtype=dtype)
eijk = np.zeros((nkpts,nkpts,nocc,nocc,nocc),dtype=mo_e_o.dtype)
kklist = kpts_helper.get_kconserv3(eom._cc._scf.cell, eom._cc.kpts,
[ka,kb,kshift,range(nkpts),range(nkpts)])
for ki, kj in itertools.product(range(nkpts), repeat=2):
kk = kklist[ki,kj]
kptvec = [ki,kj,kk,ka,kb]
lijkab[ki,kj] = contract_pl3p(l1,l2,kptvec)
rijkab[ki,kj] = contract_pr3p(r1,r2,kptvec)
for ki, kj in itertools.product(range(nkpts), repeat=2):
kk = kklist[ki,kj]
Plijkab[ki,kj] = (4.*lijkab[ki,kj] +
1.*lijkab[kj,kk].transpose(2,0,1,3,4) +
1.*lijkab[kk,ki].transpose(1,2,0,3,4) -
2.*lijkab[ki,kk].transpose(0,2,1,3,4) -
2.*lijkab[kk,kj].transpose(2,1,0,3,4) -
2.*lijkab[kj,ki].transpose(1,0,2,3,4))
eijk[ki,kj] = _get_epqr([0,nocc,ki,mo_e_o,eom.nonzero_opadding],
[0,nocc,kj,mo_e_o,eom.nonzero_opadding],
[0,nocc,kk,mo_e_o,eom.nonzero_opadding])
# Creating denominator
eab = _get_epq([0,nvir,ka,mo_e_v,eom.nonzero_vpadding],
[0,nvir,kb,mo_e_v,eom.nonzero_vpadding],
fac=[-1.,-1.])
# Creating denominator
eijkab = (eijk[:, :, :, :, :, None, None] +
eab[None, None, None, None, None, :, :])
denom = eijkab + ip_eval
denom = 1. / denom
deltaE += lib.einsum('xyijkab,xyijkab,xyijkab', Plijkab, rijkab, denom)
deltaE *= 0.5
deltaE = deltaE.real
logger.info(eom, "ipccsd energy, star energy, delta energy = %16.12f, %16.12f, %16.12f",
ip_eval, ip_eval + deltaE, deltaE)
e_star.append(ip_eval + deltaE)
return e_star
class EOMIP(eom_kgccsd.EOMIP):
matvec = ipccsd_matvec
l_matvec = lipccsd_matvec
get_diag = ipccsd_diag
ccsd_star_contract = ipccsd_star_contract
@property
def nkpts(self):
return len(self.kpts)
@property
def ip_vector_desc(self):
"""Description of the IP vector."""
return [(self.nocc,), (self.nkpts, self.nkpts, self.nocc, self.nocc, self.nmo - self.nocc)]
def ip_amplitudes_to_vector(self, t1, t2):
"""Ground state amplitudes to a vector."""
return nested_to_vector((t1, t2))[0]
def ip_vector_to_amplitudes(self, vec):
"""Ground state vector to amplitudes."""
return vector_to_nested(vec, self.ip_vector_desc)
def vector_to_amplitudes(self, vector, kshift=None):
return self.ip_vector_to_amplitudes(vector)
def amplitudes_to_vector(self, r1, r2, kshift=None, kconserv=None):
return self.ip_amplitudes_to_vector(r1, r2)
def vector_size(self):
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
return nocc + nkpts**2*nocc*nocc*nvir
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ip()
return imds
class EOMIP_Ta(EOMIP):
'''Class for EOM IPCCSD(T)*(a) method by Matthews and Stanton.'''
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris=eris)
imds.make_t3p2_ip(self._cc)
return imds
########################################
# EOM-EA-CCSD
########################################
def eaccsd_matvec(eom, vector, kshift, imds=None, diag=None):
# Ref: Nooijen and Bartlett, J. Chem. Phys. 102, 3629 (1994) Eqs.(30)-(31)
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nkpts, nocc, nvir = imds.t1.shape
kconserv = imds.kconserv
vector = eom.mask_frozen(vector, kshift, const=0.0)
r1, r2 = eom.vector_to_amplitudes(vector)
# Eq. (30)
# 1p-1p block
Hr1 = einsum('ac,c->a', imds.Lvv[kshift], r1)
# 1p-2p1h block
for kl in range(nkpts):
Hr1 += 2. * einsum('ld,lad->a', imds.Fov[kl], r2[kl, kshift])
Hr1 += -einsum('ld,lda->a', imds.Fov[kl], r2[kl, kl])
for kc in range(nkpts):
kd = kconserv[kshift, kc, kl]
Hr1 += 2. * einsum('alcd,lcd->a', imds.Wvovv[kshift, kl, kc], r2[kl, kc])
Hr1 += -einsum('aldc,lcd->a', imds.Wvovv[kshift, kl, kd], r2[kl, kc])
# Eq. (31)
# 2p1h-1p block
Hr2 = np.zeros(r2.shape, dtype=np.result_type(imds.Wvvvo.dtype, r1.dtype))
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift,ka,kj]
Hr2[kj,ka] += einsum('abcj,c->jab',imds.Wvvvo[ka,kb,kshift],r1)
# 2p1h-2p1h block
if eom.partition == 'mp':
fock = eom.eris.fock
foo = fock[:, :nocc, :nocc]
fvv = fock[:, nocc:, nocc:]
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift, ka, kj]
Hr2[kj, ka] -= einsum('lj,lab->jab', foo[kj], r2[kj, ka])
Hr2[kj, ka] += einsum('ac,jcb->jab', fvv[ka], r2[kj, ka])
Hr2[kj, ka] += einsum('bd,jad->jab', fvv[kb], r2[kj, ka])
elif eom.partition == 'full':
if diag is not None:
diag = eom.get_diag(imds=imds)
diag_matrix2 = vector_to_amplitudes(diag, nmo, nocc)[1]
Hr2 += diag_matrix2 * r2
else:
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift, ka, kj]
Hr2[kj, ka] -= einsum('lj,lab->jab', imds.Loo[kj], r2[kj, ka])
Hr2[kj, ka] += einsum('ac,jcb->jab', imds.Lvv[ka], r2[kj, ka])
Hr2[kj, ka] += einsum('bd,jad->jab', imds.Lvv[kb], r2[kj, ka])
for kd in range(nkpts):
kc = kconserv[ka, kd, kb]
Wvvvv = imds.get_Wvvvv(ka, kb, kc)
Hr2[kj, ka] += einsum('abcd,jcd->jab', Wvvvv, r2[kj, kc])
kl = kconserv[kd, kb, kj]
Hr2[kj, ka] += 2. * einsum('lbdj,lad->jab', imds.Wovvo[kl, kb, kd], r2[kl, ka])
# imds.Wvovo[kb,kl,kd,kj] <= imds.Wovov[kl,kb,kj,kd].transpose(1,0,3,2)
Hr2[kj, ka] += -einsum('bldj,lad->jab', imds.Wovov[kl, kb, kj].transpose(1, 0, 3, 2),
r2[kl, ka])
# imds.Wvoov[kb,kl,kj,kd] <= imds.Wovvo[kl,kb,kd,kj].transpose(1,0,3,2)
Hr2[kj, ka] += -einsum('bljd,lda->jab', imds.Wovvo[kl, kb, kd].transpose(1, 0, 3, 2),
r2[kl, kd])
kl = kconserv[kd, ka, kj]
# imds.Wvovo[ka,kl,kd,kj] <= imds.Wovov[kl,ka,kj,kd].transpose(1,0,3,2)
Hr2[kj, ka] += -einsum('aldj,ldb->jab', imds.Wovov[kl, ka, kj].transpose(1, 0, 3, 2),
r2[kl, kd])
tmp = (2. * einsum('xyklcd,xylcd->k', imds.Woovv[kshift, :, :], r2[:, :])
- einsum('xylkcd,xylcd->k', imds.Woovv[:, kshift, :], r2[:, :]))
Hr2[:, :] += -einsum('k,xykjab->xyjab', tmp, t2[kshift, :, :])
return eom.mask_frozen(eom.amplitudes_to_vector(Hr1, Hr2, kshift), kshift, const=0.0)
def leaccsd_matvec(eom, vector, kshift, imds=None, diag=None):
'''2hp operators are of the form s_{ l}^{cd}, i.e. 'ld' indices are coupled.'''
# Ref: Nooijen and Snijders, J. Chem. Phys. 102, 1681 (1995) Eqs.(8)-(9)
assert(eom.partition == None)
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nkpts, nocc, nvir = imds.t1.shape
kconserv = imds.kconserv
vector = eom.mask_frozen(vector, kshift, const=0.0)
r1, r2 = eom.vector_to_amplitudes(vector)
# 1p-1p block
Hr1 = np.einsum('ac,a->c', imds.Lvv[kshift], r1)
# 1p-2p1h block
for kj, ka in itertools.product(range(nkpts), repeat=2):
kb = kconserv[kj, ka, kshift]
Hr1 += np.einsum('abcj,jab->c', imds.Wvvvo[ka, kb, kshift], r2[kj, ka])
# 2p1h-1p block
Hr2 = np.zeros((nkpts, nkpts, nocc, nvir, nvir), dtype=np.complex128)
for kl, kc in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kl, kc, kshift]
Hr2[kl, kc] += 2. * (kl==kd) * np.einsum('c,ld->lcd', r1, imds.Fov[kd])
Hr2[kl, kc] += - (kl==kc) * np.einsum('d,lc->lcd', r1, imds.Fov[kl])
SWvovv = (2. * imds.Wvovv[kshift, kl, kc] -
imds.Wvovv[kshift, kl, kd].transpose(0, 1, 3, 2))
Hr2[kl, kc] += np.einsum('a,alcd->lcd', r1, SWvovv)
# 2p1h-2p1h block
for kl, kc in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kl, kc, kshift]
Hr2[kl, kc] += lib.einsum('lad,ac->lcd', r2[kl, kc], imds.Lvv[kc])
Hr2[kl, kc] += lib.einsum('lcb,bd->lcd', r2[kl, kc], imds.Lvv[kd])
Hr2[kl, kc] += -lib.einsum('jcd,lj->lcd', r2[kl, kc], imds.Loo[kl])
for kb in range(nkpts):
kj = kconserv[kl, kd, kb]
SWovvo = (2. * imds.Wovvo[kl, kb, kd] -
imds.Wovov[kl, kb, kj].transpose(0, 1, 3, 2))
Hr2[kl, kc] += lib.einsum('jcb,lbdj->lcd', r2[kj, kc], SWovvo)
kj = kconserv[kl, kc, kb]
Hr2[kl, kc] += -lib.einsum('lbjc,jbd->lcd', imds.Wovov[kl, kb, kj], r2[kj, kb])
Hr2[kl, kc] += -lib.einsum('lbcj,jdb->lcd', imds.Wovvo[kl, kb, kc], r2[kj, kd])
ka = kconserv[kc, kb, kd]
Wvvvv = imds.get_Wvvvv(ka, kb, kc)
Hr2[kl, kc] += lib.einsum('lab,abcd->lcd', r2[kl, ka], Wvvvv)
tmp = np.zeros((nocc),dtype=t1.dtype)
for ki, kc in itertools.product(range(nkpts), repeat=2):
kb = kconserv[ki, kc, kshift]
tmp += np.einsum('ijcb,ibc->j', imds.t2[ki, kshift, kc], r2[ki, kb])
for kl, kc in itertools.product(range(nkpts), repeat=2):
kd = kconserv[kl, kc, kshift]
SWoovv = (2. * imds.Woovv[kl, kshift, kd] -
imds.Woovv[kl, kshift, kc].transpose(0, 1, 3, 2))
Hr2[kl,kc] += -np.einsum('ljdc,j->lcd', SWoovv, tmp)
return eom.mask_frozen(eom.amplitudes_to_vector(Hr1, Hr2), kshift, const=0.0)
def eaccsd_diag(eom, kshift, imds=None, diag=None):
# Ref: Nooijen and Bartlett, J. Chem. Phys. 102, 3629 (1994) Eqs.(30)-(31)
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nkpts, nocc, nvir = t1.shape
kconserv = imds.kconserv
Hr1 = np.diag(imds.Lvv[kshift])
Hr2 = np.zeros((nkpts, nkpts, nocc, nvir, nvir), dtype=t2.dtype)
if eom.partition == 'mp':
foo = imds.eris.fock[:, :nocc, :nocc]
fvv = imds.eris.fock[:, nocc:, nocc:]
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift, ka, kj]
Hr2[kj, ka] -= foo[kj].diagonal()[:, None, None]
Hr2[kj, ka] += fvv[ka].diagonal()[None, :, None]
Hr2[kj, ka] += fvv[kb].diagonal()
else:
idx = np.eye(nvir, dtype=bool)
for kj in range(nkpts):
for ka in range(nkpts):
kb = kconserv[kshift, ka, kj]
Hr2[kj, ka] -= imds.Loo[kj].diagonal()[:, None, None]
Hr2[kj, ka] += imds.Lvv[ka].diagonal()[None, :, None]
Hr2[kj, ka] += imds.Lvv[kb].diagonal()
Wvvvv = imds.get_Wvvvv(ka, kb, ka)
Hr2[kj, ka] += np.einsum('abab->ab', Wvvvv)
Hr2[kj, ka] -= np.einsum('jbjb->jb', imds.Wovov[kj, kb, kj])[:, None, :]
Wovvo = np.einsum('jbbj->jb', imds.Wovvo[kj, kb, kb])
Hr2[kj, ka] += 2. * Wovvo[:, None, :]
if ka == kb:
for a in range(nvir):
Hr2[kj, ka, :, a, a] -= Wovvo[:, a]
Hr2[kj, ka] -= np.einsum('jaja->ja', imds.Wovov[kj, ka, kj])[:, :, None]
Hr2[kj, ka] -= 2 * np.einsum('ijab,ijab->jab', t2[kshift, kj, ka], imds.Woovv[kshift, kj, ka])
Hr2[kj, ka] += np.einsum('ijab,ijba->jab', t2[kshift, kj, ka], imds.Woovv[kshift, kj, kb])
return eom.amplitudes_to_vector(Hr1, Hr2, kshift)
def eaccsd_star_contract(eom, eaccsd_evals, eaccsd_evecs, leaccsd_evecs, kshift, imds=None):
'''For descreation of arguments, see `eaccsd_star_contract` in `kccsd_ghf.py`.'''
assert (eom.partition == None)
cpu1 = cpu0 = (time.clock(), time.time())
log = logger.Logger(eom.stdout, eom.verbose)
if imds is None:
imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
eris = imds.eris
fock = eris.fock
nkpts, nocc, nvir = t1.shape
nmo = nocc + nvir
dtype = np.result_type(t1, t2)
kconserv = eom.kconserv
fov = np.array([fock[ikpt, :nocc, nocc:] for ikpt in range(nkpts)])
foo = np.array([fock[ikpt, :nocc, :nocc].diagonal() for ikpt in range(nkpts)])
fvv = np.array([fock[ikpt, nocc:, nocc:].diagonal() for ikpt in range(nkpts)])
mo_energy_occ = np.array([eris.mo_energy[ki][:nocc] for ki in range(nkpts)])
mo_energy_vir = np.array([eris.mo_energy[ki][nocc:] for ki in range(nkpts)])
mo_e_o = mo_energy_occ
mo_e_v = mo_energy_vir
def contract_l3p(l1,l2,kptvec):
'''Create perturbed left 3h2p amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,ka,kb,kc]
'''
ki, kj, ka, kb, kc = kptvec
out = np.zeros((nocc,)*2 + (nvir,)*3, dtype=dtype)
if kc == kshift and kb == kconserv[ki,ka,kj]:
out -= 0.5*lib.einsum('ijab,c->ijabc', eris.oovv[ki,kj,ka], l1)
km = kconserv[ki,ka,kj]
out += lib.einsum('jima,mbc->ijabc', eris.ooov[kj,ki,km], l2[km,kb])
ke = kconserv[kshift,ka,ki]
out -= lib.einsum('ejcb,iae->ijabc', eris.vovv[ke,kj,kc], l2[ki,ka])
ke = kconserv[kshift,kc,ki]
out -= lib.einsum('ejab,iec->ijabc', eris.vovv[ke,kj,ka], l2[ki,ke])
return out
def contract_pl3p(l1,l2,kptvec):
'''Create P(ia|jb) of perturbed left 3h2p amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,ka,kb,kc]
'''
kptvec = np.asarray(kptvec)
out = contract_l3p(l1,l2,kptvec)
out += contract_l3p(l1,l2,kptvec[[1,0,3,2,4]]).transpose(1,0,3,2,4) # P(ia|jb)
return out
def contract_r3p(r1,r2,kptvec):
'''Create perturbed right 3j2p amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,ka,kb,kc]
'''
ki, kj, ka, kb, kc = kptvec
out = np.zeros((nocc,)*2 + (nvir,)*3, dtype=dtype)
ke = kconserv[ki,ka,kj]
tmp = lib.einsum('bcef,f->bce', eris.vvvv[kb,kc,ke], r1)
out -= lib.einsum('bce,ijae->ijabc', tmp, t2[ki,kj,ka])
km = kconserv[kshift,kc,kj]
tmp = einsum('mcje,e->mcj',eris.ovov[km,kc,kj],r1)
out += einsum('mcj,imab->ijabc',tmp,t2[ki,km,ka])
km = kconserv[kc,ki,ka]
tmp = einsum('bmje,e->mbj',eris.voov[kb,km,kj],r1)
out += einsum('mbj,imac->ijabc',tmp,t2[ki,km,ka])
km = kconserv[ki,ka,kj]
out += einsum('jima,mcb->ijabc',eris.ooov[kj,ki,km].conj(),r2[km,kc])
ke = kconserv[kshift,ka,ki]
out += -einsum('ejcb,iea->ijabc',eris.vovv[ke,kj,kc].conj(),r2[ki,ke])
ke = kconserv[kshift,kc,kj]
out += -einsum('eiba,jce->ijabc',eris.vovv[ke,ki,kb].conj(),r2[kj,kc])
return out
def contract_pr3p(r1,r2,kptvec):
'''Create P(ia|jb) of perturbed right 3h2p amplitude.
Args:
kptvec (`ndarray`):
Array of k-vectors [ki,kj,ka,kb,kc]
'''
kptvec = np.asarray(kptvec)
out = contract_r3p(r1,r2,kptvec)
out += contract_r3p(r1,r2,kptvec[[1,0,3,2,4]]).transpose(1,0,3,2,4) # P(ia|jb)
return out
eaccsd_evecs = np.array(eaccsd_evecs)
leaccsd_evecs = np.array(leaccsd_evecs)
e_star = []
eaccsd_evecs, leaccsd_evecs = [np.atleast_2d(x) for x in [eaccsd_evecs, leaccsd_evecs]]
eaccsd_evals = np.atleast_1d(eaccsd_evals)
for ea_eval, ea_evec, ea_levec in zip(eaccsd_evals, eaccsd_evecs, leaccsd_evecs):
# Enforcing <L|R> = 1
l1, l2 = eom.vector_to_amplitudes(ea_levec, kshift)
r1, r2 = eom.vector_to_amplitudes(ea_evec, kshift)
ldotr = np.dot(l1, r1) + np.dot(l2.ravel(), r2.ravel())
# Transposing the l2 operator
l2T = np.zeros_like(l2)
for kj, ka in itertools.product(range(nkpts), repeat=2):
kb = kconserv[ka,kj,kshift]
l2T[kj,kb] = l2[kj,ka].transpose(0,2,1)
l2 = (l2 + 2.*l2T)/3.
logger.info(eom, 'Left-right amplitude overlap : %14.8e + 1j %14.8e',
ldotr.real, ldotr.imag)
if abs(ldotr) < 1e-7:
logger.warn(eom, 'Small %s left-right amplitude overlap. Results '
'may be inaccurate.', ldotr)
l1 /= ldotr
l2 /= ldotr
deltaE = 0.0 + 1j*0.0
for ki, kj in itertools.product(range(nkpts), repeat=2):
lijabc = np.zeros((nkpts,nkpts,nocc,nocc,nvir,nvir,nvir),dtype=dtype)
Plijabc = np.zeros((nkpts,nkpts,nocc,nocc,nvir,nvir,nvir),dtype=dtype)
rijabc = np.zeros((nkpts,nkpts,nocc,nocc,nvir,nvir,nvir),dtype=dtype)
eabc = np.zeros((nkpts,nkpts,nvir,nvir,nvir),dtype=dtype)
kclist = kpts_helper.get_kconserv3(eom._cc._scf.cell, eom._cc.kpts,
[ki,kj,kshift,range(nkpts),range(nkpts)])
for ka, kb in itertools.product(range(nkpts), repeat=2):
kc = kclist[ka,kb]
kptvec = [ki,kj,ka,kb,kc]
lijabc[ka,kb] = contract_pl3p(l1,l2,kptvec)
rijabc[ka,kb] = contract_pr3p(r1,r2,kptvec)
for ka, kb in itertools.product(range(nkpts), repeat=2):
kc = kclist[ka,kb]
Plijabc[ka,kb] = (4.*lijabc[ka,kb] +
1.*lijabc[kb,kc].transpose(0,1,4,2,3) +
1.*lijabc[kc,ka].transpose(0,1,3,4,2) -
2.*lijabc[ka,kc].transpose(0,1,2,4,3) -
2.*lijabc[kc,kb].transpose(0,1,4,3,2) -
2.*lijabc[kb,ka].transpose(0,1,3,2,4))
eabc[ka,kb] = _get_epqr([0,nvir,ka,mo_e_v,eom.nonzero_vpadding],
[0,nvir,kb,mo_e_v,eom.nonzero_vpadding],
[0,nvir,kc,mo_e_v,eom.nonzero_vpadding],
fac=[-1.,-1.,-1.])
# Creating denominator
eij = _get_epq([0,nocc,ki,mo_e_o,eom.nonzero_opadding],
[0,nocc,kj,mo_e_o,eom.nonzero_opadding])
eijabc = (eij[None, None, :, :, None, None, None] +
eabc[:, :, None, None, :, :, :])
denom = eijabc + ea_eval
denom = 1. / denom
deltaE += lib.einsum('xyijabc,xyijabc,xyijabc', Plijabc, rijabc, denom)
deltaE *= 0.5
deltaE = deltaE.real
logger.info(eom, "eaccsd energy, star energy, delta energy = %16.12f, %16.12f, %16.12f",
ea_eval, ea_eval + deltaE, deltaE)
e_star.append(ea_eval + deltaE)
return e_star
class EOMEA(eom_kgccsd.EOMEA):
matvec = eaccsd_matvec
l_matvec = leaccsd_matvec
get_diag = eaccsd_diag
ccsd_star_contract = eaccsd_star_contract
@property
def nkpts(self):
return len(self.kpts)
@property
def ea_vector_desc(self):
"""Description of the EA vector."""
nvir = self.nmo - self.nocc
return [(nvir,), (self.nkpts, self.nkpts, self.nocc, nvir, nvir)]
def ea_amplitudes_to_vector(self, t1, t2, kshift=None, kconserv=None):
"""Ground state amplitudes to a vector."""
return nested_to_vector((t1, t2))[0]
def ea_vector_to_amplitudes(self, vec):
"""Ground state vector to apmplitudes."""
return vector_to_nested(vec, self.ea_vector_desc)
def vector_to_amplitudes(self, vector, kshift=None):
return self.ea_vector_to_amplitudes(vector)
def amplitudes_to_vector(self, r1, r2, kshift=None, kconserv=None):
return self.ea_amplitudes_to_vector(r1, r2)
def vector_size(self):
nocc = self.nocc
nvir = self.nmo - nocc
nkpts = self.nkpts
return nvir + nkpts**2*nocc*nvir*nvir
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ea()
return imds
class EOMEA_Ta(EOMEA):
'''Class for EOM EACCSD(T)*(a) method by Matthews and Stanton.'''
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris=eris)
imds.make_t3p2_ea(self._cc)
return imds
########################################
# EOM-EE-CCSD
########################################
def eeccsd(eom, nroots=1, koopmans=False, guess=None, left=False,
eris=None, imds=None, partition=None, kptlist=None,
dtype=None):
'''See `kernel_ee()` for a description of arguments.'''
raise NotImplementedError
def eomee_ccsd_singlet(eom, nroots=1, koopmans=False, guess=None, left=False,
eris=None, imds=None, diag=None, partition=None,
kptlist=None, dtype=None):
'''See `eom_kgccsd.kernel()` for a description of arguments.'''
eom.converged, eom.e, eom.v \
= eom_kgccsd.kernel_ee(eom, nroots, koopmans, guess, left, eris=eris,
imds=imds, diag=diag, partition=partition,
kptlist=kptlist, dtype=dtype)
return eom.e, eom.v
def vector_to_amplitudes_singlet(vector, nkpts, nmo, nocc, kconserv):
'''Transform 1-dimensional array to 3- and 7-dimensional arrays, r1 and r2.
For example:
vector: a 1-d array with all r1 elements, and r2 elements whose indices
satisfy (i k_i a k_a) >= (j k_j b k_b)
return: [r1, r2], where
r1 = r_{i k_i}^{a k_a} is a 3-d array whose elements can be accessed via
r1[k_i, i, a].
r2 = r_{i k_i, j k_j}^{a k_a, b k_b} is a 7-d array whose elements can
be accessed via r2[k_i, k_j, k_a, i, j, a, b]
'''
cput0 = (time.clock(), time.time())
log = logger.Logger(sys.stdout, logger.DEBUG)
nvir = nmo - nocc
nov = nocc*nvir
r1 = vector[:nkpts*nov].copy().reshape(nkpts, nocc, nvir)
r2 = np.zeros((nkpts**2, nkpts, nov, nov), dtype=vector.dtype)
idx, idy = np.tril_indices(nov)
nov2_tril = nov * (nov + 1) // 2
nov2 = nov * nov
r2_tril = vector[nkpts*nov:].copy()
offset = 0
for ki, ka, kj in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
kika = ki * nkpts + ka
kjkb = kj * nkpts + kb
if kika == kjkb:
tmp = r2_tril[offset:offset+nov2_tril]
r2[kika, kj, idx, idy] = tmp
r2[kjkb, ki, idy, idx] = tmp
offset += nov2_tril
elif kika > kjkb:
tmp = r2_tril[offset:offset+nov2].reshape(nov, nov)
r2[kika, kj] = tmp
r2[kjkb, ki] = tmp.transpose()
offset += nov2
# r2 indices (old): (k_i, k_a), (k_J), (i, a), (J, B)
# r2 indices (new): k_i, k_J, k_a, i, J, a, B
r2 = r2.reshape(nkpts, nkpts, nkpts, nocc, nvir, nocc, nvir).transpose(0,2,1,3,5,4,6)
log.timer("vector_to_amplitudes_singlet", *cput0)
return [r1, r2]
def amplitudes_to_vector_singlet(r1, r2, kconserv):
'''Transform 3- and 7-dimensional arrays, r1 and r2, to a 1-dimensional
array with unique indices.
For example:
r1: t_{i k_i}^{a k_a}
r2: t_{i k_i, j k_j}^{a k_a, b k_b}
return: a vector with all r1 elements, and r2 elements whose indices
satisfy (i k_i a k_a) >= (j k_j b k_b)
'''
cput0 = (time.clock(), time.time())
log = logger.Logger(sys.stdout, logger.DEBUG)
# r1 indices: k_i, i, a
nkpts, nocc, nvir = np.asarray(r1.shape)[[0, 1, 2]]
nov = nocc * nvir
# r2 indices (old): k_i, k_J, k_a, i, J, a, B
# r2 indices (new): (k_i, k_a), (k_J), (i, a), (J, B)
r2 = r2.transpose(0,2,1,3,5,4,6).reshape(nkpts**2, nkpts, nov, nov)
idx, idy = np.tril_indices(nov)
nov2_tril = nov * (nov + 1) // 2
nov2 = nov * nov
vector = np.empty(r2.size, dtype=r2.dtype)
offset = 0
for ki, ka, kj in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
kika = ki * nkpts + ka
kjkb = kj * nkpts + kb
r2ovov = r2[kika, kj]
if kika == kjkb:
vector[offset:offset+nov2_tril] = r2ovov[idx, idy]
offset += nov2_tril
elif kika > kjkb:
vector[offset:offset+nov2] = r2ovov.ravel()
offset += nov2
vector = np.hstack((r1.ravel(), vector[:offset]))
log.timer("amplitudes_to_vector_singlet", *cput0)
return vector
def join_indices(indices, struct):
'''Returns a joined index for an array of indices.
Args:
indices (np.array): an array of indices
struct (np.array): an array of index ranges
Example:
indices = np.array((3, 4, 5))
struct = np.array((10, 10, 10))
join_indices(indices, struct): 345
'''
if not isinstance(indices, np.ndarray) or not isinstance(struct, np.ndarray):
raise TypeError("Arguments %s and %s should both be numpy.ndarray" %
(repr(indices), repr(struct)))
if indices.size != struct.size:
raise ValueError("Structure shape mismatch: expected dimension = %d, found %d" %
(struct.size, indices.size))
if (indices >= struct).all():
raise ValueError("Indices are out of range")
result = 0
for dim in range(struct.size):
result += indices[dim] * np.prod(struct[dim+1:])
return result
def eeccsd_matvec(eom, vector, kshift, imds=None, diag=None):
raise NotImplementedError
def eeccsd_matvec_singlet(eom, vector, kshift, imds=None, diag=None):
"""Spin-restricted, k-point EOM-EE-CCSD equations for singlet excitation only.
This implementation can be checked against the spin-orbital version in
`eom_kccsd_ghf.eeccsd_matvec()`.
"""
cput0 = (time.clock(), time.time())
log = logger.Logger(eom.stdout, eom.verbose)
if imds is None: imds = eom.make_imds()
nocc = eom.nocc
nmo = eom.nmo
nvir = nmo - nocc
nkpts = eom.nkpts
kconserv = imds.kconserv
kconserv_r1 = eom.get_kconserv_ee_r1(kshift)
kconserv_r2 = eom.get_kconserv_ee_r2(kshift)
r1, r2 = vector_to_amplitudes_singlet(vector, nkpts, nmo, nocc, kconserv_r2)
# Build antisymmetrized tensors that will be used later
# antisymmetrized r2 : rbar_ijab = 2 r_ijab - r_ijba
# antisymmetrized woOoV: wbar_nmie = 2 W_nmie - W_nmei
# antisymmetrized wvOvV: wbar_amfe = 2 W_amfe - W_amef
# antisymmetrized woVvO: wbar_mbej = 2 W_mbej - W_mbje
r2bar = np.zeros_like(r2)
woOoV_bar = np.zeros_like(imds.woOoV)
wvOvV_bar = np.zeros_like(imds.wvOvV)
woVvO_bar = np.zeros_like(imds.woVvO)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
# rbar_ijab = 2 r_ijab - r_ijba
# ki - ka + kj - kb = kshift
kb = kconserv_r2[ki, ka, kj]
r2bar[ki, kj, ka] = 2. * r2[ki, kj, ka] - r2[ki, kj, kb].transpose(0,1,3,2)
# wbar_nmie = 2 W_nmie - W_nmei = 2 W_nmie - W_mnie
# ki->kn, kj->km, ka->ki
wkn = ki
wkm = kj
wki = ka
# kn + km - ki - ke = G
wke = kconserv[wkn, wki, wkm]
woOoV_bar[wkn, wkm, wki] = 2. * imds.woOoV[wkn, wkm, wki] - imds.woOoV[wkm, wkn, wki].transpose(1,0,2,3)
# wbar_amfe = 2 W_amfe - W_amef
# ki->ka, kj->km, ka->kf, kb->ke
wka = ki
wkm = kj
wkf = ka
# ka + km - kf - ke = G
wke = kconserv[wka, wkf, wkm]
wvOvV_bar[wka, wkm, wkf] = 2. * imds.wvOvV[wka, wkm, wkf] - imds.wvOvV[wka, wkm, wke].transpose(0,1,3,2)
# wbar_mbej = 2 W_mbej - W_mbje
# ki->km, kj->kb, ka->ke
wkm = ki
wkb = kj
wke = ka
# km + kb - ke - kj = G
wkj = kconserv[wkm, wke, wkb]
woVvO_bar[wkm, wkb, wke] = 2. * imds.woVvO[wkm, wkb, wke] - imds.woVoV[wkm, wkb, wkj].transpose(0,1,3,2)
Hr1 = np.zeros_like(r1)
for ki in range(nkpts):
# ki - ka = kshift
ka = kconserv_r1[ki]
# r_ia <- - F_mi r_ma
# km = ki
Hr1[ki] -= einsum('mi,ma->ia', imds.Foo[ki], r1[ki])
# r_ia <- F_ac r_ic
Hr1[ki] += einsum('ac,ic->ia', imds.Fvv[ka], r1[ki])
for km in range(nkpts):
# r_ia <- (2 W_amie - W_maie) r_me
# km - ke = kshift
ke = kconserv_r1[km]
Hr1[ki] += 2. * einsum('maei,me->ia', imds.woVvO[km, ka, ke], r1[km])
Hr1[ki] -= einsum('maie,me->ia', imds.woVoV[km, ka, ki], r1[km])
# r_ia <- F_me (2 r_imae - r_miae)
Hr1[ki] += 2. * einsum('me,imae->ia', imds.Fov[km], r2[ki, km, ka])
Hr1[ki] -= einsum('me,miae->ia', imds.Fov[km], r2[km, ki, ka])
for ke in range(nkpts):
# r_ia <- (2 W_amef - W_amfe) r_imef
Hr1[ki] += 2. * einsum('amef,imef->ia', imds.wvOvV[ka, km, ke], r2[ki, km, ke])
# ka + km - ke - kf = G
kf = kconserv[ka, ke, km]
Hr1[ki] -= einsum('amfe,imef->ia', imds.wvOvV[ka, km, kf], r2[ki, km, ke])
# r_ia <- -W_mnie (2 r_mnae - r_nmae)
# Rename dummy index ke -> kn
kn = ke
Hr1[ki] -= 2. * np.einsum('mnie,mnae->ia', imds.woOoV[km, kn, ki], r2[km, kn, ka])
Hr1[ki] += np.einsum('mnie,nmae->ia', imds.woOoV[km, kn, ki], r2[kn, km, ka])
Hr2 = np.zeros_like(r2)
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
# ki + kj - ka - kb = kshift
kb = kconserv_r2[ki, ka, kj]
# r_ijab <= - F_mj r_imab
# km = kj
Hr2[ki, kj, ka] -= einsum('mj,imab->ijab', imds.Foo[kj], r2[ki, kj, ka])
# r_ijab <= - F_mi r_jmba
# km = ki
Hr2[ki, kj, ka] -= einsum('mi,jmba->ijab', imds.Foo[ki], r2[kj, ki, kb])
# r_ijab <= F_be r_ijae
Hr2[ki, kj, ka] += einsum('be,ijae->ijab', imds.Fvv[kb], r2[ki, kj, ka])
# r_ijab <= F_ae r_jibe
Hr2[ki, kj, ka] += einsum('ae,jibe->ijab', imds.Fvv[ka], r2[kj, ki, kb])
# r_ijab <= W_abej r_ie
# ki - ke = kshift
ke = kconserv_r1[ki]
Hr2[ki, kj, ka] += einsum('abej,ie->ijab', imds.wvVvO[ka, kb, ke], r1[ki])
# r_ijab <= W_baei r_je
# kj - ke = kshift
ke = kconserv_r1[kj]
Hr2[ki, kj, ka] += einsum('baei,je->ijab', imds.wvVvO[kb, ka, ke], r1[kj])
# r_ijab <= - W_mbij r_ma
# km + kb - ki - kj = G
# => ki - kb + kj - km = G
km = kconserv[ki, kb, kj]
Hr2[ki, kj, ka] -= einsum('mbij,ma->ijab', imds.woVoO[km, kb, ki], r1[km])
# r_ijab <= - W_maji r_mb
# km + ka - kj - ki = G
# => ki -ka + kj - km = G
km = kconserv[ki, ka, kj]
Hr2[ki, kj, ka] -= einsum('maji,mb->ijab', imds.woVoO[km, ka, kj], r1[km])
tmp = np.zeros((nocc, nocc, nvir, nvir), dtype=r2.dtype)
for km in range(nkpts):
# r_ijab <= (2 W_mbej - W_mbje) r_imae - W_mbej r_imea
# km + kb - ke - kj = G
ke = kconserv[km, kj, kb]
tmp += einsum('mbej,imae->ijab', woVvO_bar[km, kb, ke], r2[ki, km, ka])
tmp -= einsum('mbej,imea->ijab', imds.woVvO[km, kb, ke], r2[ki, km, ke])
# r_ijab <= - W_maje r_imeb
# km + ka - kj - ke = G
ke = kconserv[km, kj, ka]
tmp -= einsum('maje,imeb->ijab', imds.woVoV[km, ka, kj], r2[ki, km, ke])
Hr2[ki, kj, ka] += tmp
# The following two lines can be obtained by simply transposing tmp:
# r_ijab <= (2 W_maei - W_maie) r_jmbe - W_maei r_jmeb
# r_ijab <= - W_mbie r_jmea
Hr2[kj, ki, kb] += tmp.transpose(1,0,3,2)
tmp = None
for km in range(nkpts):
# r_ijab <= W_abef r_ijef
# Rename dummy index km -> ke
ke = km
Hr2[ki, kj, ka] += einsum('abef,ijef->ijab', imds.wvVvV[ka, kb, ke], r2[ki, kj, ke])
# r_ijab <= W_mnij r_mnab
# km + kn - ki - kj = G
# => ki - km + kj - kn = G
kn = kconserv[ki, km, kj]
Hr2[ki, kj, ka] += einsum('mnij,mnab->ijab', imds.woOoO[km, kn, ki], r2[km, kn, ka])
#
# r_ijab <= - W_mnef t_imab (2 r_jnef - r_jnfe)
# r_ijab <= - W_mnef t_jmba (2 r_inef - r_infe)
# r_ijab <= - W_mnef t_ijae (2 r_mnbf - r_mnfb)
# r_ijab <= - W_mnef t_jibe (2 r_mnaf - r_mnfa)
#
# r_ijab <= - (2 W_nmie - W_nmei) t_jnba r_me
# r_ijab <= - (2 W_nmje - W_nmej) t_inab r_me
# r_ijab <= + (2 W_amfe - W_amef) t_jibf r_me
# r_ijab <= + (2 W_bmfe - W_bmef) t_ijaf r_me
#
# First, build intermediates M = W.r
#
wr2_oo = np.zeros((nkpts, nocc, nocc), dtype=r2.dtype)
wr2_vv = np.zeros((nkpts, nvir, nvir), dtype=r2.dtype)
wr1_oo = np.zeros_like(wr2_oo)
wr1_vv = np.zeros_like(wr2_vv)
for kj in range(nkpts):
# Wr2_jm = W_mnef (2 r_jnef - r_jnfe) = W_mnef rbar_jnef
# km + kn - ke - kf = G
# kj + kn - ke - kf = kshift
# => kj - km = kshift
km = kconserv_r1[kj]
# x: kn, y: ke
wr2_oo[kj] += einsum('xymnef,xyjnef->jm', imds.woOvV[km], r2bar[kj])
# Wr2_eb = W_mnef (2 r_mnbf - r_mnfb) = W_mnef rbar_mnbf
ke = kj
# km + kn - ke - kf = G
# km + kn - kb - kf = kshift
# => ke - kb = kshift
kb = kconserv_r1[ke]
# x: km, y: kn
wr2_vv[ke] += einsum('xymnef,xymnbf->eb', imds.woOvV[:, :, ke], r2bar[:, :, kb])
# Wr1_in = (2 W_nmie - W_nmei) r_me = wbar_nmie r_me
ki = kj
# kn + km - ki - ke = G
# km - ke = kshift
# => ki - kn = kshift
kn = kconserv_r1[ki]
# x: km
wr1_oo[ki] += einsum('xnmie,xme->in', woOoV_bar[kn, :, ki], r1)
# Wr1_fa = (2 W_amfe - W_amef) r_me = wbar_amfe r_me
kf = kj
# ka + km - kf - ke = G
# km - ke = kshift
# => kf - ka = kshift
ka = kconserv_r1[kf]
# x: km
wr1_vv[kf] += einsum('xamfe,xme->fa', wvOvV_bar[ka, :, kf], r1)
#
# Second, compute the whole contraction
#
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
# ki + kj - ka - kb = kshift
kb = kconserv_r2[ki, ka, kj]
# r_ijab <= - Wr2_jm t_imab
# kj - km = kshift
km = kconserv_r1[kj]
Hr2[ki, kj, ka] -= einsum('jm,imab->ijab', wr2_oo[kj], imds.t2[ki, km, ka])
# r_ijab <= - Wr2_im t_jmba
# ki - km = kshift
km = kconserv_r1[ki]
Hr2[ki, kj, ka] -= einsum('im,jmba->ijab', wr2_oo[ki], imds.t2[kj, km, kb])
# r_ijab <= - Wr2_eb t_ijae
# ki + kj - ka - ke = G
ke = kconserv[ki, ka, kj]
Hr2[ki, kj, ka] -= einsum('eb,ijae->ijab', wr2_vv[ke], imds.t2[ki, kj, ka])
# r_ijab <= - Wr2_ea t_jibe
# kj + ki - kb - ke = G
ke = kconserv[kj, kb, ki]
Hr2[ki, kj, ka] -= einsum('ea,jibe->ijab', wr2_vv[ke], imds.t2[kj, ki, kb])
# r_ijab <= - Wr1_in t_jnba
# ki - kn = kshift
kn = kconserv_r1[ki]
Hr2[ki, kj, ka] -= einsum('in,jnba->ijab', wr1_oo[ki], imds.t2[kj, kn, kb])
# r_ijab <= - Wr1_jn t_inab
# kj - kn = kshift
kn = kconserv_r1[kj]
Hr2[ki, kj, ka] -= einsum('jn,inab->ijab', wr1_oo[kj], imds.t2[ki, kn, ka])
# r_ijab <= Wr1_fa t_jibf
# kj + ki - kb - kf = G
kf = kconserv[kj, kb, ki]
Hr2[ki, kj, ka] += einsum('fa,jibf->ijab', wr1_vv[kf], imds.t2[kj, ki, kb])
# r_ijab <= Wr1_fb t_ijaf
# ki + kj - ka - kf = G
kf = kconserv[ki, ka, kj]
Hr2[ki, kj, ka] += einsum('fb,ijaf->ijab', wr1_vv[kf], imds.t2[ki, kj, ka])
vector = amplitudes_to_vector_singlet(Hr1, Hr2, kconserv_r2)
log.timer("matvec EOMEE Singlet", *cput0)
return vector
def eeccsd_diag(eom, kshift=0, imds=None):
'''Diagonal elements of similarity-transformed Hamiltonian'''
if imds is None: imds = eom.make_imds()
t1, t2 = imds.t1, imds.t2
nkpts, nocc, nvir = t1.shape
kconserv = eom.kconserv
kconserv_r1 = eom.get_kconserv_ee_r1(kshift)
kconserv_r2 = eom.get_kconserv_ee_r2(kshift)
Hr1 = np.zeros((nkpts, nocc, nvir), dtype=t1.dtype)
for ki in range(nkpts):
ka = kconserv_r1[ki]
Hr1[ki] -= imds.Foo[ki].diagonal()[:,None]
Hr1[ki] += imds.Fvv[ka].diagonal()[None,:]
Hr1[ki] += np.einsum('iaai->ia', imds.woVvO[ki, ka, ka])
Hr1[ki] -= np.einsum('iaia->ia', imds.woVoV[ki, ka, ki])
Hr2 = np.zeros((nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir), dtype=t1.dtype)
# TODO Allow partition='mp'
if eom.partition == "mp":
raise NotImplementedError
else:
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv_r2[ki, ka, kj]
Hr2[ki, kj, ka] -= imds.Foo[ki].diagonal()[:, None, None, None]
Hr2[ki, kj, ka] -= imds.Foo[kj].diagonal()[None, :, None, None]
Hr2[ki, kj, ka] += imds.Fvv[ka].diagonal()[None, None, :, None]
Hr2[ki, kj, ka] += imds.Fvv[kb].diagonal()[None, None, None, :]
Hr2[ki, kj, ka] += np.einsum('jbbj->jb', imds.woVvO[kj, kb, kb])[None, :, None, :]
Hr2[ki, kj, ka] -= np.einsum('jbjb->jb', imds.woVoV[kj, kb, kj])[None, :, None, :]
Hr2[ki, kj, ka] -= np.einsum('jaja->ja', imds.woVoV[kj, ka, kj])[None, :, :, None]
Hr2[ki, kj, ka] -= np.einsum('ibib->ib', imds.woVoV[ki, kb, ki])[:, None, None, :]
Hr2[ki, kj, ka] += np.einsum('iaai->ia', imds.woVvO[ki, ka, ka])[:, None, :, None]
Hr2[ki, kj, ka] -= np.einsum('iaia->ia', imds.woVoV[ki, ka, ki])[:, None, :, None]
Hr2[ki, kj, ka] += np.einsum('abab->ab', imds.wvVvV[ka, kb, ka])[None, None, :, :]
Hr2[ki, kj, ka] += np.einsum('ijij->ij', imds.woOoO[ki, kj, ki])[:, :, None, None]
# ki - ka + km - kb = G
# => ka - ki + kb - km = G
km = kconserv[ka, ki, kb]
Hr2[ki, kj, ka] -= np.einsum('imab,imab->iab', imds.woOvV[ki, km, ka], imds.t2[ki, km, ka])[:, None, :, :]
# km - ka + kj - kb = G
# => ka - kj + kb - km = G
km = kconserv[ka, kj, kb]
Hr2[ki, kj, ka] -= np.einsum('mjab,mjab->jab', imds.woOvV[km, kj, ka], imds.t2[km, kj, ka])[None, :, :, :]
# ki - ka + kj - ke = G
Hr2[ki, kj, ka] -= np.einsum('ijae,ijae->ija', imds.woOvV[ki, kj, ka], imds.t2[ki, kj, ka])[:, :, :, None]
# ki - ke + kj - kb = G
ke = kconserv[ki, kb, kj]
Hr2[ki, kj, ka] -= np.einsum('ijeb,ijeb->ijb', imds.woOvV[ki, kj, ke], imds.t2[ki, kj, ke])[:, :, None, :]
vector = amplitudes_to_vector_singlet(Hr1, Hr2, kconserv_r2)
return vector
def eeccsd_matvec_singlet_Hr1(eom, vector, kshift, imds=None):
'''A mini version of eeccsd_matvec_singlet(), in the sense that
only Hbar.r1 is performed.'''
if imds is None: imds = eom.make_imds()
nkpts = eom.nkpts
nocc = eom.nocc
nvir = eom.nmo - nocc
r1_size = nkpts * nocc * nvir
kconserv_r1 = eom.get_kconserv_ee_r1(kshift)
if len(vector) != r1_size:
raise ValueError("vector length mismatch: expected {0}, "
"found {1}".format(r1_size, len(vector)))
r1 = vector.reshape(nkpts, nocc, nvir)
Hr1 = np.zeros_like(r1)
for ki in range(nkpts):
# ki - ka = kshift
ka = kconserv_r1[ki]
# r_ia <- - F_mi r_ma
# km = ki
Hr1[ki] -= einsum('mi,ma->ia', imds.Foo[ki], r1[ki])
# r_ia <- F_ac r_ic
Hr1[ki] += einsum('ac,ic->ia', imds.Fvv[ka], r1[ki])
for km in range(nkpts):
# r_ia <- (2 W_amie - W_maie) r_me
# km - ke = kshift
ke = kconserv_r1[km]
Hr1[ki] += 2. * einsum('maei,me->ia', imds.woVvO[km, ka, ke], r1[km])
Hr1[ki] -= einsum('maie,me->ia', imds.woVoV[km, ka, ki], r1[km])
return Hr1.ravel()
def eeccsd_cis_approx_slow(eom, kshift, nroots=1, imds=None, **kwargs):
'''Build initial R vector through diagonalization of <r1|Hbar|r1>
This method evaluates the matrix elements of Hbar in r1 space in the following way:
- 1st col of Hbar = matvec(r1_col1) where r1_col1 = [1, 0, 0, 0, ...]
- 2nd col of Hbar = matvec(r1_col2) where r1_col2 = [0, 1, 0, 0, ...]
- and so on
Note that such evaluation has N^3 cost, but error free (because matvec() has been proven correct).
'''
cput0 = (time.clock(), time.time())
log = logger.Logger(eom.stdout, eom.verbose)
if imds is None: imds = eom.make_imds()
nkpts, nocc, nvir = imds.t1.shape
dtype = imds.t1.dtype
r1_size = nkpts * nocc * nvir
H1 = np.zeros([r1_size, r1_size], dtype=dtype)
for col in range(r1_size):
vec = np.zeros(r1_size, dtype=dtype)
vec[col] = 1.0
H1[:, col] = eeccsd_matvec_singlet_Hr1(eom, vec, kshift, imds=imds)
eigval, eigvec = np.linalg.eig(H1)
idx = eigval.argsort()[:nroots]
eigval = eigval[idx]
eigvec = eigvec[:, idx]
log.timer("EOMEE CIS approx", *cput0)
return eigval, eigvec
def get_init_guess_cis(eom, kshift, nroots=1, imds=None, **kwargs):
'''Build initial R vector through diagonalization of <r1|Hbar|r1>
Check eeccsd_cis_approx_slow() for details.
'''
if imds is None: imds = eom.make_imds()
nkpts, nocc, nvir = imds.t1.shape
dtype = imds.t1.dtype
r1_size = nkpts * nocc * nvir
vector_size = eom.vector_size(kshift)
eigval, eigvec = eeccsd_cis_approx_slow(eom, kshift, nroots, imds)
guess = []
for i in range(nroots):
g = np.zeros(int(vector_size), dtype=dtype)
g[:r1_size] = eigvec[:, i]
guess.append(g)
return guess
def cis_easy(eom, nroots=1, kptlist=None, imds=None, **kwargs):
'''An easy implementation of k-point CIS based on EOMCC infrastructure.'''
print("\n******** <function 'pyscf.pbc.cc.eom_kccsd_rhf.cis_easy'> ********")
if imds is None:
cc = eom._cc
t1_old, t2_old = cc.t1.copy(), cc.t2.copy()
# Zero t1, t2
cc.t1 = np.zeros_like(t1_old)
cc.t2 = np.zeros_like(t2_old)
# Remake intermediates using zero t1, t2 => get bare Hamiltonian back
imds = eom.make_imds()
# Recover t1, t2 so that the following calculations based on `eom` are
# not affected.
cc.t1, cc.t2 = None, None
cc.t1, cc.t2 = t1_old, t2_old
evals = [None]*len(kptlist)
evecs = [None]*len(kptlist)
for k, kshift in enumerate(kptlist):
print("\nkshift =", kshift)
eigval, eigvec = eeccsd_cis_approx_slow(eom, kshift, nroots, imds)
evals[k] = eigval
evecs[k] = eigvec
for i in range(nroots):
print('CIS root {:d} E = {:.16g}'.format(i, eigval[i].real))
return evals, evecs
class EOMEE(eom_kgccsd.EOMEE):
kernel = eeccsd
eeccsd = eeccsd
matvec = eeccsd_matvec
get_diag = eeccsd_diag
@property
def nkpts(self):
return len(self.kpts)
def vector_size(self, kshift=0):
raise NotImplementedError
def make_imds(self, eris=None):
imds = _IMDS(self._cc, eris)
imds.make_ee()
return imds
class EOMEESinglet(EOMEE):
kernel = eomee_ccsd_singlet
eomee_ccsd_singlet = eomee_ccsd_singlet
matvec = eeccsd_matvec_singlet
get_init_guess = get_init_guess_cis
cis = cis_easy
def vector_size(self, kshift=0):
'''Size of the linear excitation operator R vector based on spatial
orbital basis.
r1 : r_{i k_i}${a k_a}
r2 : r_{i k_i, J k_J}^{a k_a, B k_B}
Only r1aa, r2abab spin blocks are considered.
'''
nocc = self.nocc
nvir = self.nmo - nocc
nov = nocc * nvir
nkpts = self.nkpts
size_r1 = nkpts*nov
kconserv = self.get_kconserv_ee_r2(kshift)
size_r2 = 0
for ki, kj, ka in kpts_helper.loop_kkk(nkpts):
kb = kconserv[ki, ka, kj]
kika = ki * nkpts + ka
kjkb = kj * nkpts + kb
if kika == kjkb:
size_r2 += nov * (nov + 1) // 2
elif kika > kjkb:
size_r2 += nov**2
return size_r1 + size_r2
def gen_matvec(self, kshift, imds=None, left=False, **kwargs):
if imds is None: imds = self.make_imds()
diag = self.get_diag(kshift, imds)
if left:
# TODO allow left vectors to be computed
raise NotImplementedError
else:
matvec = lambda xs: [self.matvec(x, kshift, imds, diag) for x in xs]
return matvec, diag
def vector_to_amplitudes(self, vector, kshift=None, nkpts=None, nmo=None, nocc=None, kconserv=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
if nkpts is None: nkpts = self.nkpts
if kconserv is None: kconserv = self.get_kconserv_ee_r2(kshift)
return vector_to_amplitudes_singlet(vector, nkpts, nmo, nocc, kconserv)
def amplitudes_to_vector(self, r1, r2, kshift=None, kconserv=None):
if kconserv is None: kconserv = self.get_kconserv_ee_r2(kshift)
return amplitudes_to_vector_singlet(r1, r2, kconserv)
class EOMEETriplet(EOMEE):
def vector_size(self, kshift=0):
return None
class EOMEESpinFlip(EOMEE):
def vector_size(self, kshift=0):
return None
imd = imdk
class _IMDS:
# Identical to molecular rccsd_slow
def __init__(self, cc, eris=None):
self.verbose = cc.verbose
self.stdout = cc.stdout
self.t1 = cc.t1
self.t2 = cc.t2
if eris is None:
eris = cc.ao2mo()
self.eris = eris
self.kconserv = cc.khelper.kconserv
self.made_ip_imds = False
self.made_ea_imds = False
self._made_shared_2e = False
# TODO: check whether to hold all stuff in memory
if getattr(self.eris, "feri1", None):
self._fimd = lib.H5TmpFile()
else:
self._fimd = None
def _make_shared_1e(self):
cput0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
self.Loo = imd.Loo(t1, t2, eris, kconserv)
self.Lvv = imd.Lvv(t1, t2, eris, kconserv)
self.Fov = imd.cc_Fov(t1, t2, eris, kconserv)
log.timer('EOM-CCSD shared one-electron intermediates', *cput0)
def _make_shared_2e(self):
cput0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
if self._fimd is not None:
nkpts, nocc, nvir = t1.shape
ovov_dest = self._fimd.create_dataset('ovov', (nkpts, nkpts, nkpts, nocc, nvir, nocc, nvir), t1.dtype.char)
ovvo_dest = self._fimd.create_dataset('ovvo', (nkpts, nkpts, nkpts, nocc, nvir, nvir, nocc), t1.dtype.char)
else:
ovov_dest = ovvo_dest = None
# 2 virtuals
self.Wovov = imd.Wovov(t1, t2, eris, kconserv, ovov_dest)
self.Wovvo = imd.Wovvo(t1, t2, eris, kconserv, ovvo_dest)
self.Woovv = eris.oovv
log.timer('EOM-CCSD shared two-electron intermediates', *cput0)
def make_ip(self, ip_partition=None):
self._make_shared_1e()
if self._made_shared_2e is False and ip_partition != 'mp':
self._make_shared_2e()
self._made_shared_2e = True
cput0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
if self._fimd is not None:
nkpts, nocc, nvir = t1.shape
oooo_dest = self._fimd.create_dataset('oooo', (nkpts, nkpts, nkpts, nocc, nocc, nocc, nocc), t1.dtype.char)
ooov_dest = self._fimd.create_dataset('ooov', (nkpts, nkpts, nkpts, nocc, nocc, nocc, nvir), t1.dtype.char)
ovoo_dest = self._fimd.create_dataset('ovoo', (nkpts, nkpts, nkpts, nocc, nvir, nocc, nocc), t1.dtype.char)
else:
oooo_dest = ooov_dest = ovoo_dest = None
# 0 or 1 virtuals
if ip_partition != 'mp':
self.Woooo = imd.Woooo(t1, t2, eris, kconserv, oooo_dest)
self.Wooov = imd.Wooov(t1, t2, eris, kconserv, ooov_dest)
self.Wovoo = imd.Wovoo(t1, t2, eris, kconserv, ovoo_dest)
self.made_ip_imds = True
log.timer('EOM-CCSD IP intermediates', *cput0)
def make_t3p2_ip(self, cc):
cput0 = (time.clock(), time.time())
t1, t2, eris = cc.t1, cc.t2, self.eris
delta_E_tot, pt1, pt2, Wovoo, Wvvvo = \
imd.get_t3p2_imds(cc, t1, t2, eris)
self.t1 = pt1
self.t2 = pt2
self._made_shared_2e = False # Force update
self.make_ip() # Make after t1/t2 updated
self.Wovoo = self.Wovoo + Wovoo
self.made_ip_imds = True
logger.timer_debug1(self, 'EOM-CCSD(T)a IP intermediates', *cput0)
return self
def make_ea(self, ea_partition=None):
self._make_shared_1e()
if self._made_shared_2e is False and ea_partition != 'mp':
self._make_shared_2e()
self._made_shared_2e = True
cput0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
if self._fimd is not None:
nkpts, nocc, nvir = t1.shape
vovv_dest = self._fimd.create_dataset('vovv', (nkpts, nkpts, nkpts, nvir, nocc, nvir, nvir), t1.dtype.char)
vvvo_dest = self._fimd.create_dataset('vvvo', (nkpts, nkpts, nkpts, nvir, nvir, nvir, nocc), t1.dtype.char)
if eris.vvvv is not None:
vvvv_dest = self._fimd.create_dataset('vvvv', (nkpts, nkpts, nkpts, nvir, nvir, nvir, nvir), t1.dtype.char)
else:
vovv_dest = vvvo_dest = vvvv_dest = None
# 3 or 4 virtuals
self.Wvovv = imd.Wvovv(t1, t2, eris, kconserv, vovv_dest)
if ea_partition == 'mp' and np.all(t1 == 0):
self.Wvvvo = imd.Wvvvo(t1, t2, eris, kconserv, vvvo_dest)
else:
if eris.vvvv is None:
self.Wvvvv = None
else:
self.Wvvvv = imd.Wvvvv(t1, t2, eris, kconserv, vvvv_dest)
self.Wvvvo = imd.Wvvvo(t1, t2, eris, kconserv, self.Wvvvv, vvvo_dest)
self.made_ea_imds = True
log.timer('EOM-CCSD EA intermediates', *cput0)
def make_t3p2_ea(self, cc):
cput0 = (time.clock(), time.time())
t1, t2, eris = cc.t1, cc.t2, self.eris
delta_E_tot, pt1, pt2, Wovoo, Wvvvo = \
imd.get_t3p2_imds(cc, t1, t2, eris)
self.t1 = pt1
self.t2 = pt2
self._made_shared_2e = False # Force update
self.make_ea() # Make after t1/t2 updated
self.Wvvvo = self.Wvvvo + Wvvvo
self.made_ea_imds = True
logger.timer_debug1(self, 'EOM-CCSD(T)a EA intermediates', *cput0)
return self
def make_t3p2_ip_ea(self, cc):
cput0 = (time.clock(), time.time())
t1, t2, eris = cc.t1, cc.t2, self.eris
delta_E_tot, pt1, pt2, Wovoo, Wvvvo = \
imd.get_t3p2_imds(cc, t1, t2, eris)
self.t1 = pt1
self.t2 = pt2
self._made_shared_2e = False # Force update
self.make_ip() # Make after t1/t2 updated
self.make_ea() # Make after t1/t2 updated
self.Wovoo = self.Wovoo + Wovoo
self.Wvvvo = self.Wvvvo + Wvvvo
self.made_ip_imds = True
self.made_ea_imds = True
logger.timer_debug1(self, 'EOM-CCSD(T)a IP/EA intermediates', *cput0)
return self
def make_ee(self, ee_partition=None):
self._make_shared_1e()
if self._made_shared_2e is False:
self._make_shared_2e()
self._made_shared_2e = True
cput0 = (time.clock(), time.time())
log = logger.Logger(self.stdout, self.verbose)
t1, t2, eris = self.t1, self.t2, self.eris
kconserv = self.kconserv
# Rename imds to match the notations in pyscf.cc.eom_rccsd
self.Foo = self.Loo
self.Fvv = self.Lvv
self.woOvV = self.Woovv
self.woVvO = self.Wovvo
self.woVoV = self.Wovov
if not self.made_ip_imds:
# 0 or 1 virtuals
self.woOoO = imd.Woooo(t1, t2, eris, kconserv)
self.woOoV = imd.Wooov(t1, t2, eris, kconserv)
self.woVoO = imd.Wovoo(t1, t2, eris, kconserv)
else:
self.woOoO = self.Woooo
self.woOoV = self.Wooov
self.woVoO = self.Wovoo
if not self.made_ea_imds:
# 3 or 4 virtuals
self.wvOvV = imd.Wvovv(t1, t2, eris, kconserv)
self.wvVvV = imd.Wvvvv(t1, t2, eris, kconserv)
self.wvVvO = imd.Wvvvo(t1, t2, eris, kconserv, self.wvVvV)
else:
self.wvOvV = self.Wvovv
self.wvVvV = self.Wvvvv
self.wvVvO = self.Wvvvo
self.made_ee_imds = True
log.timer('EOM-CCSD EE intermediates', *cput0)
def get_Wvvvv(self, ka, kb, kc):
if not self.made_ea_imds:
self.make_ea()
if self.Wvvvv is None:
return imd.get_Wvvvv(self.t1, self.t2, self.eris, self.kconserv,
ka, kb, kc)
else:
return self.Wvvvv[ka,kb,kc]
| gkc1000/pyscf | pyscf/pbc/cc/eom_kccsd_rhf.py | Python | apache-2.0 | 68,814 | [
"PySCF"
] | fcda5ecb555ca095266df76de522f00cccd6492ea5b02803f8983ad9292aa53c |
#########
# firstTry.py
# This program is part of the online PS-Drone-API-tutorial on www.playsheep.de/drone.
# It shows how to do basic movements with a Parrot AR.Drone 2.0 using the PS-Drone-API.
# Dependencies: a POSIX OS, PS-Drone-API 2.0 beta or higher.
# (w) J. Philipp de Graaff, www.playsheep.de, 2014
##########
# LICENCE:
# Artistic License 2.0 as seen on http://opensource.org/licenses/artistic-license-2.0 (retrieved December 2014)
# Visit www.playsheep.de/drone or see the PS-Drone-API-documentation for an abstract from the Artistic License 2.0.
###########
import time
import api.ps_drone as ps_drone # Imports the PS-Drone-API
drone = ps_drone.Drone() # Initializes the PS-Drone-API
drone.startup() # Connects to the drone and starts subprocesses
drone.takeoff() # Drone starts
time.sleep(7.5) # Gives the drone time to start
drone.moveForward() # Drone flies forward...
time.sleep(2) # ... for two seconds
drone.stop() # Drone stops...
time.sleep(2) # ... needs, like a car, time to stop
drone.moveBackward(0.25) # Drone flies backward with a quarter speed...
time.sleep(1.5) # ... for one and a half seconds
drone.stop() # Drone stops
time.sleep(2)
drone.setSpeed(1.0) # Sets default moving speed to 1.0 (=100%)
print drone.setSpeed() # Shows the default moving speed
drone.turnLeft() # Drone moves full speed to the left...
time.sleep(2) # ... for two seconds
drone.stop() # Drone stops
time.sleep(2)
drone.land() # Drone lands
| reixd/ps-drone | tutorials/firstTry.py | Python | artistic-2.0 | 1,714 | [
"VisIt"
] | 2dd21009f069d151dc5bc81cb8655a6ffca4eb3d6f16f1e59222a62497fb9b84 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2018 David Arroyo Menéndez
# Author: David Arroyo Menéndez <davidam@gnu.org>
# Maintainer: David Arroyo Menéndez <davidam@gnu.org>
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA,
# Dimensions in a netCDF file.
from netCDF4 import Dataset
rootgrp = Dataset("test.nc", "a")
fcstgrp = rootgrp.createGroup("forecasts")
analgrp = rootgrp.createGroup("analyses")
print(rootgrp.groups)
level = rootgrp.createDimension("level", None)
time = rootgrp.createDimension("time", None)
lat = rootgrp.createDimension("lat", 73)
lon = rootgrp.createDimension("lon", 144)
print(rootgrp.dimensions)
print(len(lon))
print(lon.isunlimited())
print(time.isunlimited())
for dimobj in rootgrp.dimensions.values():
print(dimobj)
| davidam/python-examples | netcdf/netcdf-example4.py | Python | gpl-3.0 | 1,434 | [
"NetCDF"
] | f467ae2ffa7c91c3b9dfb684946162647e220e45bf8232b7e0cf20c588b3b811 |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.orca.automl.xgboost.XGBoost import XGBoostModelBuilder
from zoo.orca.automl.auto_estimator import AutoEstimator
class AutoXGBClassifier(AutoEstimator):
def __init__(self,
logs_dir="/tmp/auto_xgb_classifier_logs",
cpus_per_trial=1,
name=None,
**xgb_configs
):
"""
Automated xgboost classifier
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_xgb_classifier_logs"
:param cpus_per_trial: Int. Number of cpus for each trial. It defaults to 1.
The value will also be assigned to n_jobs in xgboost,
which is the number of parallel threads used to run xgboost.
:param name: Name of the auto xgboost classifier.
:param xgb_configs: Other scikit learn xgboost parameters. You may refer to
https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
for the parameter names to specify. Note that we will directly use cpus_per_trial value
for n_jobs in xgboost and you shouldn't specify n_jobs again.
"""
xgb_model_builder = XGBoostModelBuilder(model_type='classifier',
cpus_per_trial=cpus_per_trial,
**xgb_configs)
resources_per_trial = {"cpu": cpus_per_trial} if cpus_per_trial else None
super().__init__(model_builder=xgb_model_builder,
logs_dir=logs_dir,
resources_per_trial=resources_per_trial,
name=name)
class AutoXGBRegressor(AutoEstimator):
def __init__(self,
logs_dir="~/auto_xgb_regressor_logs",
cpus_per_trial=1,
name=None,
**xgb_configs
):
"""
Automated xgboost regressor
:param logs_dir: Local directory to save logs and results. It defaults to
"/tmp/auto_xgb_classifier_logs"
:param cpus_per_trial: Int. Number of cpus for each trial. The value will also be assigned
to n_jobs, which is the number of parallel threads used to run xgboost.
:param name: Name of the auto xgboost classifier.
:param xgb_configs: Other scikit learn xgboost parameters. You may refer to
https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
for the parameter names to specify. Note that we will directly use cpus_per_trial value
for n_jobs in xgboost and you shouldn't specify n_jobs again.
"""
xgb_model_builder = XGBoostModelBuilder(model_type='regressor',
cpus_per_trial=cpus_per_trial,
**xgb_configs)
resources_per_trial = {"cpu": cpus_per_trial} if cpus_per_trial else None
super().__init__(model_builder=xgb_model_builder,
logs_dir=logs_dir,
resources_per_trial=resources_per_trial,
name=name)
| intel-analytics/analytics-zoo | pyzoo/zoo/orca/automl/xgboost/auto_xgb.py | Python | apache-2.0 | 3,793 | [
"ORCA"
] | 9b649ba6fb2cfc5ebfa1b55630bd4ed51fc928a82df4ebfbe6767e3f037b9cec |
import sys
sys.path.insert(0, '../../')
import numpy as np
from jax.experimental import optimizers
import matplotlib.pyplot as plt
import time
import pandas as pd
from sde_gp import SDEGP
import approximate_inference as approx_inf
import priors
import likelihoods
from utils import softplus_list, plot
import pickle
plot_intermediate = False
print('loading coal data ...')
cvind = np.loadtxt('cvind.csv').astype(int)
# 10-fold cross-validation
nt = np.floor(cvind.shape[0]/10).astype(int)
cvind = np.reshape(cvind[:10*nt], (10, nt))
D = np.loadtxt('binned.csv')
x = D[:, 0:1]
y = D[:, 1:]
N = D.shape[0]
np.random.seed(123)
# meanval = np.log(len(disaster_timings)/num_time_bins) # TODO: incorporate mean
if len(sys.argv) > 1:
method = int(sys.argv[1])
fold = int(sys.argv[2])
save_result = True
plot_final = False
else:
method = 0
fold = 0
save_result = False
plot_final = True
print('method number', method)
print('batch number', fold)
# Get training and test indices
ind_test = cvind[fold, :]
ind_train = np.setdiff1d(cvind, ind_test)
x_train = x[ind_train, ...] # 90/10 train/test split
x_test = x[ind_test, ...]
y_train = y[ind_train, ...]
y_test = y[ind_test, ...]
var_f = 1.0 # GP variance
len_f = 1.0 # GP lengthscale
prior = priors.Matern52(variance=var_f, lengthscale=len_f)
lik = likelihoods.Poisson()
if method == 0:
inf_method = approx_inf.EEP(power=1)
elif method == 1:
inf_method = approx_inf.EEP(power=0.5)
elif method == 2:
inf_method = approx_inf.EKS()
elif method == 3:
inf_method = approx_inf.UEP(power=1)
elif method == 4:
inf_method = approx_inf.UEP(power=0.5)
elif method == 5:
inf_method = approx_inf.UKS()
elif method == 6:
inf_method = approx_inf.GHEP(power=1)
elif method == 7:
inf_method = approx_inf.GHEP(power=0.5)
elif method == 8:
inf_method = approx_inf.GHKS()
elif method == 9:
inf_method = approx_inf.EP(power=1, intmethod='UT')
elif method == 10:
inf_method = approx_inf.EP(power=0.5, intmethod='UT')
elif method == 11:
inf_method = approx_inf.EP(power=0.01, intmethod='UT')
elif method == 12:
inf_method = approx_inf.EP(power=1, intmethod='GH')
elif method == 13:
inf_method = approx_inf.EP(power=0.5, intmethod='GH')
elif method == 14:
inf_method = approx_inf.EP(power=0.01, intmethod='GH')
elif method == 15:
inf_method = approx_inf.VI(intmethod='UT')
elif method == 16:
inf_method = approx_inf.VI(intmethod='GH')
model = SDEGP(prior=prior, likelihood=lik, t=x_train, y=y_train, approx_inf=inf_method)
opt_init, opt_update, get_params = optimizers.adam(step_size=2.5e-1)
# parameters should be a 2-element list [param_prior, param_likelihood]
opt_state = opt_init([model.prior.hyp, model.likelihood.hyp])
def gradient_step(i, state, mod):
params = get_params(state)
mod.prior.hyp = params[0]
mod.likelihood.hyp = params[1]
# grad(Filter) + Smoother:
neg_log_marg_lik, gradients = mod.run()
# neg_log_marg_lik, gradients = mod.run_two_stage()
prior_params = softplus_list(params[0])
print('iter %2d: var_f=%1.2f len_f=%1.2f, nlml=%2.2f' %
(i, prior_params[0], prior_params[1], neg_log_marg_lik))
if plot_intermediate:
plot(mod, i)
return opt_update(i, gradients, state)
print('optimising the hyperparameters ...')
t0 = time.time()
for j in range(250):
opt_state = gradient_step(j, opt_state, model)
t1 = time.time()
print('optimisation time: %2.2f secs' % (t1-t0))
x_plot = np.linspace(np.min(x_test)-5, np.max(x_test)+5, 200)
# calculate posterior predictive distribution via filtering and smoothing at train & test locations:
print('calculating the posterior predictive distribution ...')
t0 = time.time()
nlpd = model.negative_log_predictive_density(t=x_test, y=y_test)
posterior_mean, posterior_var = model.predict(t=x_plot)
t1 = time.time()
print('prediction time: %2.2f secs' % (t1-t0))
print('NLPD: %1.2f' % nlpd)
if save_result:
with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "wb") as fp:
pickle.dump(nlpd, fp)
# with open("output/" + str(method) + "_" + str(fold) + "_nlpd.txt", "rb") as fp:
# nlpd_show = pickle.load(fp)
# print(nlpd_show)
if plot_final:
disaster_timings = pd.read_csv('../../../data/coal.txt', header=None).values[:, 0]
link_fn = model.likelihood.link_fn
scale = N / (max(x) - min(x))
post_mean_lgcp = link_fn(posterior_mean + posterior_var / 2) * scale
lb_lgcp = link_fn(posterior_mean - np.sqrt(posterior_var) * 1.645) * scale
ub_lgcp = link_fn(posterior_mean + np.sqrt(posterior_var) * 1.645) * scale
print('plotting ...')
plt.figure(1, figsize=(12, 5))
plt.clf()
plt.plot(disaster_timings, 0*disaster_timings, 'k+', label='observations', clip_on=False)
plt.plot(x_plot, post_mean_lgcp, 'g', label='posterior mean')
plt.fill_between(x_plot, lb_lgcp, ub_lgcp, color='g', alpha=0.05, label='95% confidence')
plt.xlim(x_plot[0], x_plot[-1])
plt.ylim(0.0)
plt.legend()
plt.title('log-Gaussian Cox process via Kalman smoothing (coal mining disasters)')
plt.xlabel('year')
plt.ylabel('accident intensity')
plt.show()
| AaltoML/kalman-jax | kalmanjax/experiments/coal/coal.py | Python | apache-2.0 | 5,189 | [
"Gaussian"
] | 43e1c103a73e5275ab01f499c60ff6f4ee7470905ec3bec2810a323d0d80e940 |
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_iso8601,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
unescapeHTML,
unified_strdate,
unified_timestamp,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
determine_protocol,
parse_duration,
mimetype2ext,
update_Request,
update_url_query,
parse_m3u8_attributes,
extract_attributes,
parse_codecs,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series or programme:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
raise ExtractorError(
'%s. You might want to use --proxy to workaround.' % msg,
expected=True)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
return (username, password)
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return (username, password)
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
})
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)<input([^>]+)>', html):
if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
continue
name = re.search(r'(?:name|id)=(["\'])(?P<value>.+?)\1', input)
if not name:
continue
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
if not value:
continue
hidden_inputs[name.group('value')] = value.group('value')
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# We should try extracting formats only from master playlists [1], i.e.
# playlists that describe available qualities. On the other hand media
# playlists [2] should be returned as is since they contain just the media
# without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 2] master
# playlist tags MUST NOT appear in a media playist and vice versa.
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
# and MUST NOT appear in master playlist thus we can clearly detect media
# playlist with this criterion.
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
last_info = {}
last_media = {}
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
media = parse_m3u8_attributes(line)
media_type = media.get('TYPE')
if media_type in ('VIDEO', 'AUDIO'):
media_url = media.get('URI')
if media_url:
format_id = []
for v in (media.get('GROUP-ID'), media.get('NAME')):
if v:
format_id.append(v)
formats.append({
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'language': media.get('LANGUAGE'),
'vcodec': 'none' if media_type == 'AUDIO' else None,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
})
else:
# When there is no URI in EXT-X-MEDIA let this tag's
# data be used by regular URI lines below
last_media = media
elif line.startswith('#') or not line.strip():
continue
else:
tbr = int_or_none(last_info.get('AVERAGE-BANDWIDTH') or last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF it still sometimes may be present
stream_name = last_info.get('NAME') or last_media.get('NAME')
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_info.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
f.update(parse_codecs(last_info.get('CODECS')))
formats.append(f)
last_info = {}
last_media = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
initialization = segment_list.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
start_number = segment_template.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
segment_timeline = segment_template.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
else:
timescale = segment_template.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = segment_template.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
media_template = segment_template.get('media')
if media_template:
ms_info['media_template'] = media_template
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization_url'] = initialization
else:
initialization = segment_template.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type == 'video' or content_type == 'audio':
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'vcodec': 'none' if content_type == 'audio' else representation_attrib.get('codecs'),
'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float(representation_ms_info['segment_duration']) / float(representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
media_template = representation_ms_info['media_template']
media_template = media_template.replace('$RepresentationID$', representation_id)
media_template = re.sub(r'\$(Number|Bandwidth|Time)\$', r'%(\1)d', media_template)
media_template = re.sub(r'\$(Number|Bandwidth|Time)%([^$]+)\$', r'%(\1)\2', media_template)
media_template.replace('$$', '$')
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template:
representation_ms_info['segment_urls'] = [
media_template % {
'Number': segment_number,
'Bandwidth': representation_attrib.get('bandwidth'),
}
for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
representation_ms_info['segment_urls'] = []
segment_time = 0
def add_segment_url():
representation_ms_info['segment_urls'].append(
media_template % {
'Time': segment_time,
'Bandwidth': representation_attrib.get('bandwidth'),
}
)
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
add_segment_url()
for r in range(s.get('r', 0)):
segment_time += s['d']
add_segment_url()
segment_time += s['d']
if 'segment_urls' in representation_ms_info:
f.update({
'segment_urls': representation_ms_info['segment_urls'],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
f.update({
'initialization_url': initialization_url,
})
if not f.get('url'):
f['url'] = initialization_url
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8'):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type):
full_url = absolute_url(src)
if determine_ext(full_url) == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
for media_tag, media_type, media_content in re.findall(r'(?s)(<(?P<tag>video|audio)[^>]*>)(.*?)</(?P=tag)>', webpage):
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
is_plain_url, formats = _media_formats(src, media_type)
if is_plain_url:
f = parse_content_type(source_attributes.get('type'))
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind == 'subtitles':
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id):
formats = []
f4m_url = re.sub(r'(https?://.+?)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
formats.extend(self._extract_f4m_formats(
update_url_query(f4m_url, {'hdcore': '3.7.0'}),
video_id, f4m_id='hds', fatal=False))
m3u8_url = re.sub(r'(https?://.+?)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| fast90/youtube-dl | youtube_dl/extractor/common.py | Python | unlicense | 90,213 | [
"VisIt"
] | 9baaafe4cd3eddf468cfcfcf590e4550d5a591113c678ff10f78a1b47386aee5 |
# encoding: utf-8
"""
Enable pygtk to be used interacive by setting PyOS_InputHook.
Authors: Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import gtk, gobject # @UnresolvedImport
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _main_quit(*args, **kwargs):
gtk.main_quit()
return False
def create_inputhook_gtk(stdin_file):
def inputhook_gtk():
gobject.io_add_watch(stdin_file, gobject.IO_IN, _main_quit)
gtk.main()
return 0
return inputhook_gtk
| SlicerRt/SlicerDebuggingTools | PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/pydev_ipython/inputhookgtk.py | Python | bsd-3-clause | 1,143 | [
"Brian"
] | 0065bed22d64db8a4b524367bb7369ef37035868591d31951d33d226a5e23e6d |
######################################################################
#
# File: bdata.py
#
# Copyright 2013 Brian Beach, All Rights Reserved.
#
######################################################################
"""
This module deals with the data structures I like to use in Python for
holding data. I haven't looked at SciPy much yet, so theirs might be
better.
The goal of this module is to make it easy to do simple things with
these data structures, like display graphs of them.
STRUCTURE: Homogeneous list of dicts.
This structure is a list of dicts, where each dict contains exactly
the same keys. You can think of it as a table, where each key is a
column, and each of the dicts is a row.
STRUCTURE: Columns
"""
import itertools
import math
import unittest
from collections import Counter
def log2(x):
return math.log(x) / math.log(2)
def in_range(x, low, high):
return (low <= x) and (x <= high)
def round_up_to_nice(x, tolerance=None):
"""
Given a number, round it up to a 'nice' number that is 'close'.
In this sense, a nice number is one with fewer significant digits,
and close is within +/- tolerance.
"""
if x == 0:
return 0
if tolerance is None:
tolerance = abs(x / 10.0)
if tolerance == 0:
return x
quantum = math.pow(10, math.ceil(math.log10(tolerance)))
for divisor in itertools.cycle([2, 5]):
result = math.ceil(x / quantum) * quantum
assert x <= result
if result - x <= tolerance:
return result
quantum /= divisor
def round_down_to_nice(x, tolerance=None):
"""
Given a number, round it down to a 'nice' number that is 'close'.
In this sense, a nice number is one with fewer significant digits,
and close is within 10%.
"""
if x == 0:
return 0
if tolerance is None:
tolerance = abs(x / 10.0)
if tolerance == 0:
return x
quantum = math.pow(10, math.ceil(math.log10(tolerance)))
for divisor in itertools.cycle([2, 5]):
result = math.floor(x / quantum) * quantum
assert result <= x
if x - result <= tolerance:
return result
quantum /= divisor
def nearest(x, values):
"""
Returns the value that in closest to x. If there is a tie, the
result is arbitrary.
"""
result = values[0]
for v in values:
if abs(v - x) < abs(result - x):
result = v
return result
def round_to_nice(x, tolerance=None):
"""
Given a number, returns a 'nice' number that is near it. The
result could be less than or greater than x.
"""
choices = [
round_down_to_nice(x, tolerance),
round_up_to_nice(x, tolerance)
]
return nearest(x, choices)
class TestUtilities(unittest.TestCase):
def test_log2(self):
self.assertAlmostEqual(10, log2(1024))
def test_round_up_to_nice(self):
self.assertAlmostEqual(90, round_up_to_nice(85))
self.assertAlmostEqual(12, round_up_to_nice(11.8))
self.assertAlmostEqual(0.8, round_up_to_nice(0.799))
self.assertAlmostEqual(0.8, round_up_to_nice(0.8))
self.assertAlmostEqual(-1.0, round_up_to_nice(-1.1))
self.assertAlmostEqual(-0.75, round_up_to_nice(-0.799))
def test_round_down_to_nice(self):
self.assertAlmostEqual(0, round_down_to_nice(0))
self.assertAlmostEqual(80, round_down_to_nice(85))
self.assertAlmostEqual(11, round_down_to_nice(11.8))
self.assertAlmostEqual(0.8, round_down_to_nice(0.8))
self.assertAlmostEqual(0.8, round_down_to_nice(0.801))
def test_nearest(self):
self.assertEqual(1, nearest(1.3, [1, 2, 3]))
self.assertEqual(2, nearest(1.8, [1, 2, 3]))
self.assertEqual(2, nearest(2.4, [1, 2, 3]))
def round_to_nice(self):
self.assertAlmostEqual(3.1, round_to_nice(math.pi))
class AutoBins(object):
"""
Given a set of values, figures out a reasonable number of bins for
a histogram over those values. An attempt is made to make the
bucket boundaries be nice round numbers.
You can initialize this class with either a list of values, like
this:
[0, 1, 5, 7, 1, 4, 2, 2, 2, 2, 2]
Or, if you have already summarized your data by counting the
values, you can initialize it with a list of value_and_count
pairs. This is equivalent to the one above:
[(0, 1), (1, 2), (2, 5), (4, 1), (5, 1), (6,1)]
"""
def __init__(self, values=None, values_and_counts=None, bin_count=None):
# Check arguments
if (values is None) and (values_and_counts is None):
raise ValueError('Either values or values an counts should be set')
if (values is not None) and (values_and_counts is not None):
raise ValueError('Only one of values or values_and_counts should be set')
# Make sure that both values and values_and_counts are set
if values_and_counts is None:
values_and_counts = [(v, c) for (v, c) in Counter(values).iteritems()]
values = set(v for (v,c) in values_and_counts)
total_count = sum(c for (v,c) in values_and_counts)
# With a single value, it's a degenerate case.
if len(values) == 1:
value = values_and_counts[0][0]
self.lower_bound = value
self.bin_size = 0
self.bin_count = 1
self.bin_boundaries = [value, value]
self.logarithmic = False
return
# Figure out the number of bins.
# This is Sturges's rule from:
# http://onlinestatbook.com/2/graphing_distributions/histograms.html
low = min(values)
high = max(values)
span = float(high - low)
if bin_count is None:
bin_count = 1 + round(log2(total_count))
# Start with a nice number in the middle, and work out from there.
middle = round_to_nice((low + high) / 2.0, span / bin_count)
# Pick a nice bin size, big enough to reach from the middle out to
# both ends.
biggest_side = max(middle - low, high - middle)
bin_size = round_up_to_nice((biggest_side * 2.0) / bin_count)
# Find the lower bound. This works for both even and odd numbers
# of bins.
lower_bound = middle - bin_size * (bin_count / 2.0)
# Special case for all-positive values. In this case, we
# don't want a lower bound that's negative because it looks
# weird.
if lower_bound < 0 and 0 <= low:
lower_bound = 0
# Compute the bin count
bin_count = int(math.ceil((high - lower_bound) / bin_size))
assert lower_bound <= low
assert high <= lower_bound + bin_size * bin_count
# Compute the bin boundaries, because they're handy
bin_boundaries = [
lower_bound + i * bin_size
for i in xrange(bin_count + 1)
]
# Should we switch to logarithmic? If more than half the
# values are NOT in the first bin, then we're good.
number_in_first_bucket = sum(
c
for (v,c) in values_and_counts
if v < bin_boundaries[1]
)
self.logarithmic = (
0 < low and
(total_count / 2 < number_in_first_bucket)
)
if not self.logarithmic:
self.lower_bound = lower_bound
self.bin_size = bin_size
self.bin_count = bin_count
self.bin_boundaries = bin_boundaries
return
# Now we'll re-do the bucketization for the logarithmic case.
lower_bound = round_down_to_nice(low)
upper_bound = high
# If b is the bin count, then we want the b-th root of the
# difference between the lower and upper bounds to be the
# growth factor between buckets.
b = bin_count
total_growth = float(upper_bound) / float(lower_bound)
bucket_exponent = round_up_to_nice(total_growth ** (1.0 / b))
self.lower_bound = lower_bound
self.bin_count = bin_count
self.bin_boundaries = [
round_up_to_nice(lower_bound * (bucket_exponent ** i))
for i in xrange(bin_count + 1)
]
def is_logarithmic(self):
return self.logarithmic
def get_bin_count(self):
return self.bin_count
def get_bin_boundaries(self):
"""
Returns one more number than the bin count. The bins are the
ranges between adjacent numbers in the list.
"""
return self.bin_boundaries
def get_bin_index_for_value(self, value):
for i in xrange(self.bin_count):
if value < self.bin_boundaries[i+1]:
return i
return self.bin_count - 1
def __str__(self):
return "<bins %s>" % (", ".join(str(b) for b in self.bin_boundaries))
class TestAutoBins(unittest.TestCase):
def test_single_value(self):
bins = AutoBins([1])
self.assertEqual([1, 1], bins.get_bin_boundaries())
def test_linear(self):
bins = AutoBins([1.2 + i/5.0 for i in range(16)])
self.assertAlmostEqual(0.75, bins.get_bin_boundaries()[0])
self.assertAlmostEqual(1.45, bins.get_bin_boundaries()[1])
self.assertAlmostEqual(5, bins.get_bin_count())
def test_manual_bin_count(self):
bins = AutoBins([1.2 + i/5.0 for i in range(16)], bin_count=4)
self.assertAlmostEqual(1.2, bins.get_bin_boundaries()[0])
self.assertAlmostEqual(2.1, bins.get_bin_boundaries()[1])
self.assertAlmostEqual(4, bins.get_bin_count())
def test_logarithmic(self):
values = [1.1 ** i for i in range(100)]
bins = AutoBins(values)
self.assertEqual(True, bins.is_logarithmic())
self.assertAlmostEqual(1, bins.get_bin_boundaries()[0])
self.assertAlmostEqual(4.0, bins.get_bin_boundaries()[1])
def test_regress_1(self):
values = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 110, 120, 130,
140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250,
260, 270, 280, 290, 300, 310, 320, 330, 340, 350, 360, 370,
380, 390, 400, 410, 420, 430, 440, 450, 460, 470, 480, 490,
500, 510, 520, 530, 540, 550, 560, 570, 580, 590, 600, 610,
620, 630, 640, 650, 660, 670, 680, 690, 700, 710, 720, 730,
740, 750, 760, 770, 780, 790, 800, 810, 820, 830, 840, 850,
860, 870, 880, 890, 900, 910, 920, 930, 940, 950, 960, 970,
980, 990, 1000, 1100, 1200, 1300, 1400, 1500, 1600, 1700,
1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700,
2800, 2900, 3000, 3100, 3200, 3300, 3400, 3500, 3600, 3700,
3800, 3900, 4000, 4100, 4200, 4300, 4400, 4500, 4600, 4700,
4800, 4900, 5000, 5100, 5200, 5300, 5400, 5500, 5600, 5700,
5800, 5900, 6000, 6100, 6200, 6300, 6400, 6600, 6700, 6800,
6900, 7000, 7100, 7200, 7300, 7400, 7500, 7600, 7700, 7800,
7900, 8000, 8100, 8200, 8300, 8400, 8500, 8600, 8700, 8800,
8900, 9000, 9100, 9300, 9400, 9500, 9600, 9700, 10000, 11000,
12000, 13000, 14000, 15000, 16000, 17000, 18000, 19000, 20000,
21000, 22000, 23000, 24000, 25000, 26000, 27000, 28000, 29000,
30000, 31000, 32000, 33000, 34000, 35000, 36000, 37000, 38000,
39000, 40000, 41000, 42000, 43000, 44000, 45000, 46000, 47000,
48000, 49000, 50000, 51000, 52000, 53000, 54000, 55000, 56000,
57000, 58000, 59000, 60000, 61000, 62000, 63000, 64000, 65000,
66000, 67000, 68000, 69000, 70000, 71000, 72000, 73000, 74000,
75000, 76000, 77000, 78000, 79000, 80000, 81000, 82000, 83000,
84000, 85000, 86000, 87000, 88000, 89000, 90000, 91000, 92000,
93000, 94000, 95000, 96000, 97000, 98000, 99000, 100000,
110000, 120000, 130000, 140000, 150000, 160000, 170000,
180000, 190000, 200000, 210000, 220000, 230000, 240000,
250000, 260000, 270000, 280000, 290000, 300000, 310000,
320000, 330000, 340000, 350000, 360000, 370000, 380000,
390000, 400000, 410000, 420000, 430000, 440000, 450000,
460000, 470000, 480000, 490000, 500000, 510000, 520000,
530000, 540000, 550000, 560000, 570000, 580000, 590000,
600000, 610000, 620000, 630000, 640000, 650000, 660000,
670000, 680000, 690000, 700000, 710000, 720000, 730000,
740000, 750000, 760000, 770000, 780000, 790000, 800000,
810000, 820000, 830000, 840000, 850000, 860000, 870000,
880000, 890000, 900000, 910000, 920000, 930000, 940000,
950000, 960000, 970000, 980000, 990000, 1000000, 1100000,
1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000,
1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
2600000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000,
3300000, 3400000, 3500000, 3600000, 3700000, 3800000, 3900000,
4000000, 4100000, 4200000, 4300000, 4400000, 4500000, 4600000,
4700000, 4800000, 4900000, 5000000, 5100000, 5200000, 5300000,
5400000, 5500000, 5600000, 5700000, 5800000, 5900000, 6000000,
6100000, 6200000, 6300000, 6400000, 6500000, 6600000, 6700000,
6800000, 6900000, 7000000, 7100000, 7200000, 7300000, 7400000,
7500000, 7600000, 7700000, 7800000, 7900000, 8000000, 8100000,
8200000, 8300000, 8400000, 8500000, 8600000, 8700000, 8800000,
8900000, 9000000, 9100000, 9200000, 9300000, 9400000, 9500000,
9600000, 9700000, 9800000, 9900000, 10000000, 11000000,
12000000, 13000000, 14000000, 15000000, 16000000, 17000000,
18000000, 19000000, 20000000, 21000000, 22000000, 23000000,
24000000, 25000000, 26000000, 27000000, 28000000, 29000000,
30000000, 31000000, 32000000, 33000000, 34000000, 35000000,
36000000, 37000000, 38000000, 39000000, 40000000, 41000000,
42000000, 43000000, 44000000, 45000000, 46000000, 47000000,
48000000, 49000000, 50000000, 51000000, 52000000, 53000000,
54000000, 55000000, 56000000, 57000000, 58000000, 59000000,
60000000, 61000000, 62000000, 63000000, 64000000, 65000000,
66000000, 67000000, 68000000, 69000000, 70000000, 71000000,
72000000, 73000000, 74000000, 75000000, 76000000, 77000000,
78000000, 79000000, 80000000, 81000000, 82000000, 83000000,
84000000, 85000000, 86000000, 87000000, 88000000, 89000000,
90000000, 91000000, 92000000, 93000000, 94000000, 95000000,
96000000, 97000000, 98000000, 99000000, 100000000, 110000000,
120000000, 130000000, 140000000, 150000000, 160000000,
170000000, 180000000, 190000000, 200000000, 210000000,
220000000, 230000000, 240000000, 250000000, 260000000,
270000000, 280000000, 290000000, 300000000, 310000000,
320000000, 330000000, 340000000, 350000000, 360000000,
370000000, 380000000, 390000000, 400000000, 410000000,
420000000, 430000000, 440000000, 450000000, 460000000,
470000000, 480000000, 490000000, 500000000, 510000000,
520000000, 530000000, 540000000, 550000000, 560000000,
570000000, 580000000, 590000000, 600000000, 610000000,
620000000, 630000000, 640000000, 650000000, 660000000,
670000000, 680000000, 690000000, 700000000, 710000000,
720000000, 730000000, 740000000, 750000000, 760000000,
770000000, 780000000, 790000000, 800000000, 810000000,
820000000, 830000000, 840000000, 850000000, 860000000,
870000000, 880000000, 890000000, 900000000, 920000000,
930000000, 940000000, 950000000, 960000000, 970000000,
980000000, 990000000, 1000000000, 1100000000, 1200000000,
1300000000, 1400000000, 1500000000, 1600000000, 1700000000,
1800000000, 1900000000, 2000000000, 2100000000, 2200000000,
2300000000, 2400000000, 2500000000, 2600000000, 2900000000,
3000000000, 4300000000, 180000000000, 6600000000000]
bins = AutoBins(values, bin_count=8)
self.assertLessEqual(0, bins.get_bin_boundaries()[0])
def test_regress_2(self):
values_and_counts = [(1, 6221), (2, 1310), (3,
801), (4, 876), (5, 434), (6, 285), (7, 47), (8, 304), (9,
143), (10, 52), (11, 93), (12, 256), (13, 223), (14, 63), (15,
133), (16, 71), (17, 153), (18, 76), (19, 49), (20, 124), (21,
87), (22, 36), (23, 13), (24, 73), (25, 44), (26, 53), (27,
76), (28, 73), (29, 36), (30, 28), (31, 47), (32, 10), (33,
9), (34, 16), (35, 47), (36, 109), (37, 51), (38, 54), (39,
2), (40, 5), (41, 101), (42, 18), (43, 28), (44, 30), (45,
11), (46, 43), (47, 36), (48, 10), (49, 15), (50, 21), (51,
115), (52, 49), (53, 52), (54, 31), (55, 42), (56, 50), (57,
29), (58, 174), (59, 24), (60, 11), (61, 9), (62, 22), (63,
7), (64, 5), (65, 9), (66, 22), (67, 19), (68, 22), (69, 81),
(70, 11), (71, 14), (72, 10), (73, 9), (74, 33), (75, 8), (76,
26), (77, 9), (78, 13), (79, 11), (80, 7), (81, 15), (82, 23),
(83, 8), (84, 18), (85, 25), (86, 39), (87, 10), (88, 34),
(89, 7), (90, 10), (91, 11), (92, 17), (93, 18), (94, 7), (95,
10), (96, 13), (97, 34), (98, 19), (99, 15), (100, 122), (110,
132), (120, 264), (130, 241), (140, 189), (150, 183), (160,
162), (170, 61), (180, 74), (190, 75), (200, 81), (210, 86),
(220, 54), (230, 33), (240, 36), (250, 131), (260, 53), (270,
103), (280, 106), (290, 33), (300, 31), (310, 42), (320, 33),
(330, 29), (340, 19), (350, 9), (360, 5), (370, 6), (380, 11),
(390, 13), (400, 15), (410, 13), (420, 17), (430, 43), (440,
53), (450, 33), (460, 41), (470, 41), (480, 40), (490, 33),
(500, 34), (510, 35), (520, 39), (530, 54), (540, 79), (550,
74), (560, 22), (570, 15), (580, 12), (590, 15), (600, 22),
(610, 13), (620, 19), (630, 16), (640, 17), (650, 15), (660,
21), (670, 20), (680, 22), (690, 19), (700, 27), (710, 55),
(720, 48), (730, 19), (740, 16), (750, 12), (760, 12), (770,
11), (780, 11), (790, 14), (800, 15), (810, 15), (820, 14),
(830, 13), (840, 15), (850, 17), (860, 17), (870, 16), (880,
22), (890, 26), (900, 21), (910, 28), (920, 25), (930, 31),
(940, 34), (950, 31), (960, 27), (970, 27), (980, 11), (990,
11), (1000, 65), (1100, 151), (1200, 193), (1300, 175), (1400,
185), (1500, 133), (1600, 94), (1700, 121), (1800, 64), (1900,
45), (2000, 12), (2100, 11), (2200, 8), (2300, 10), (2400, 6),
(2500, 8), (2600, 5), (2700, 8), (2800, 7), (2900, 7), (3000,
6), (3100, 9), (3200, 9), (3300, 3), (3400, 10), (3500, 6),
(3600, 6), (3700, 9), (3800, 5), (3900, 7), (4000, 6), (4100,
9), (4200, 3), (4300, 8), (4400, 5), (4500, 8), (4600, 4),
(4700, 5), (4800, 6), (4900, 3), (5000, 5), (5100, 4), (5200,
2), (5300, 5), (5400, 2), (5500, 3), (5600, 6), (5700, 1),
(5800, 4), (5900, 4), (6000, 3), (6100, 4), (6200, 5), (6300,
1), (6400, 4), (6500, 4), (6600, 2), (6700, 4), (6800, 2),
(6900, 4), (7000, 3), (7100, 4), (7200, 4), (7300, 4), (7400,
3), (7500, 4), (7600, 2), (7700, 3), (7800, 5), (7900, 2),
(8000, 2), (8100, 3), (8200, 3), (8300, 3), (8400, 3), (8500,
4), (8600, 3), (8700, 1), (8800, 3), (8900, 3), (9000, 2),
(9100, 2), (9200, 3), (9300, 2), (9400, 3), (9500, 2), (9600,
2), (9700, 2), (9800, 3), (9900, 2), (10000, 11), (11000, 19),
(12000, 16), (13000, 14), (14000, 14), (15000, 11), (16000,
6), (17000, 6), (18000, 7), (19000, 7), (20000, 6), (21000,
3), (370000, 15)]
bins = AutoBins(values_and_counts=values_and_counts)
self.assertTrue(bins.is_logarithmic())
class Histogram(object):
def __init__(self, name, values):
self.name = name
self.values = values
self.bins = AutoBins(values)
# Count the values in each bin
bin_count = self.bins.get_bin_count()
self.counts = [0] * bin_count
for v in values:
bin_index = self.bins.get_bin_index_for_value(v)
self.counts[bin_index] += 1
def __str__(self):
# Figure out the scale-down factor (if needed) for an
# 80-column display. Leaving 5 on the left (for spaces and
# the axis) and 5 on the right (for good measure), that leaves
# room for 70 stars.
scale = 1.0
biggest_count = max(self.counts)
if 70 < biggest_count:
scale = round_up_to_nice(biggest_count / 70.0)
# get some info
bin_count = self.bins.get_bin_count()
bin_boundaries = self.bins.get_bin_boundaries()
# Build the display string.
result = []
result.append('#\n')
result.append('# Histogram of %s:\n' % self.name)
result.append('#\n')
if scale != 1.0:
result.append('# One star = %s\n' % scale)
result.append('#\n')
result.append('\n')
for i in range(bin_count):
result.append(str(bin_boundaries[i]))
result.append('\n')
result.append(' |')
result.append('*' * int(round(self.counts[i] / scale)))
result.append('\n')
result.append(str(bin_boundaries[-1]))
result.append('\n')
return ''.join(result)
class TestHistogram(unittest.TestCase):
def test_regress_1(self):
# The outlier value was ending up outside the range of all of
# the bins.
h = Histogram('test', [28, 27, 27, 24, 27, 24, 28, 27, 26,
27, 28, 25, 25, 27, 24, 28, 27, 25,
24, 26, 26, 24, 26, 25, 27, 35, 26,
25, 27, 27, 28, 27, 28, 27, 26, 27,
24, 24, 25, 27, 27, 25, 24, 27, 25])
def is_number(x):
return isinstance(x, float) or isinstance(x, int)
def make_formatter(values):
"""
Returns a function that can be used to format the values in the
list. Picks a reasonable number of digits of accuracy.
"""
# Get the ranges of the values.
max_string_length = 0
biggest_abs = 0
all_numbers_ints = True
any_numbers = False
for v in values:
if isinstance(v, basestring):
max_string_length = max(max_string_length, len(v))
elif is_number(v):
any_numbers = True
biggest_abs = max(biggest_abs, abs(v))
if v != int(v):
all_numbers_ints = False
else:
max_string_length = max(max_string_length, len(str(v)))
# Make a format string for string values
if any_numbers:
string_format = '%%%ds' % max_string_length
else:
string_format = '%%-%ds' % max_string_length
# Make a format string for numeric values.
if biggest_abs < 1.0:
left_of_decimal = 1
else:
left_of_decimal = int(2 + math.floor(math.log10(biggest_abs)))
if all_numbers_ints:
right_of_decimal = 0
else:
right_of_decimal = max(0, 5 - left_of_decimal)
total_size = 2 + left_of_decimal + right_of_decimal
number_format = '%%%d.%df' % (total_size, right_of_decimal)
# Make the format function
def formatter(v):
if isinstance(v, basestring):
return string_format % v
elif is_number(v):
return number_format % v
else:
return string_format % str(v)
return formatter
class Table(object):
"""
Knows how to display a table of data.
The data is in the form of a list of dicts:
[ { 'a' : 4, 'b' : 8 },
{ 'a' : 5, 'b' : 9 } ]
"""
def __init__(self, data, column_names=None, sort_key=None, reverse=False,
default_value=None, formatters=None, titles=None):
if formatters is None:
formatters = {}
if column_names is None:
column_names = sorted(data[0].keys())
if sort_key is None:
self.data = data
else:
self.data = sorted(data, key=(lambda item: item[sort_key]), reverse=reverse)
self.column_names = column_names
self.default_value = default_value
self.formatters = [
self._make_formatter(col, formatters)
for col in column_names
]
if titles is None:
titles = {}
self.column_titles = [
titles.get(column_name, column_name)
for column_name in column_names
]
first_values = [data[0].get(col, self.default_value) for col in column_names]
first_row = [
formatter(v)
for (formatter, v) in zip(self.formatters, first_values)
]
self.column_widths = [
max(len(col), len(val))
for (col, val) in zip(self.column_titles, first_row)
]
def _make_formatter(self, column_name, explicit_formatters):
if column_name in explicit_formatters:
formatter = explicit_formatters[column_name]
if isinstance(formatter, basestring):
def format_string(v):
return formatter % v
return format_string
else:
return formatter
else:
values = [item.get(column_name, self.default_value)
for item in self.data]
return make_formatter(values)
def __str__(self):
result = []
# Title row
total_width = 1 + sum(3 + w for w in self.column_widths)
result.append('|')
result.append('=' * (total_width - 2))
result.append('|')
result.append('\n')
result.append('| ')
for (col, w) in zip(self.column_titles, self.column_widths):
result.append(self.pad(col, w))
result.append(' | ')
result.append('\n')
result.append('|')
result.append('-' * (total_width - 2))
result.append('|')
result.append('\n')
# Data rows
for item in self.data:
result.append('| ')
for (col, formatter, w) in zip(self.column_names, self.formatters, self.column_widths):
result.append(self.pad(formatter(item.get(col, self.default_value)), w))
result.append(' | ')
result.append('\n')
result.append('|')
result.append('=' * (total_width - 2))
result.append('|')
result.append('\n')
return ''.join(result)
def csv(self):
result = []
result.append(','.join(self.column_titles))
for item in self.data:
result.append(','.join(
formatter(item.get(col, self.default_value)).strip()
for (col, formatter) in zip(self.column_names, self.formatters)
))
return '\n'.join(result) + '\n'
def html(self):
result = []
result.append('<table>')
result.append(' <tbody>')
result.append(' <tr>')
for col in self.column_titles:
result.append(' <th>%s</th>' % col)
result.append(' </tr>')
for item in self.data:
result.append(' <tr>')
for (col, formatter) in zip(self.column_names, self.formatters):
value = formatter(item.get(col, self.default_value)).strip()
result.append(' <td>%s</td>' % value)
result.append(' </tr>')
result.append(' <tbody>')
result.append('</table>')
return '\n'.join(result) + '\n'
def pad(self, s, width):
if len(s) < width:
return (' ' * (width - len(s))) + s
else:
return s[:width]
class TestTable(unittest.TestCase):
def test_formatter(self):
data = [ { 'a' : 1, 'b' : 2, 'c' : 3 } ]
table = Table(data, formatters={'b' : '%02d', 'c' : (lambda x : 'n')})
self.assertEqual(
'|===============|\n' +
'| a | b | c | \n' +
'|---------------|\n' +
'| 1 | 02 | n | \n' +
'|===============|\n',
str(table)
)
def test_titles(self):
data = [ { 'a' : 1, 'b' : 2, 'c' : 3 } ]
table = Table(data, titles={'a':'A', 'c':'foo'})
self.assertEqual(
'|====================|\n' +
'| A | b | foo | \n' +
'|--------------------|\n' +
'| 1 | 2 | 3 | \n' +
'|====================|\n',
str(table)
)
def test_html(self):
data = [ { 'a' : 1, 'b' : 2 }, { 'a' : 3, 'b' : 4 } ]
table = Table(data)
self.assertEqual(
'<table>\n' +
' <tbody>\n' +
' <tr>\n' +
' <th>a</th>\n' +
' <th>b</th>\n' +
' </tr>\n' +
' <tr>\n' +
' <td>1</td>\n' +
' <td>2</td>\n' +
' </tr>\n' +
' <tr>\n' +
' <td>3</td>\n' +
' <td>4</td>\n' +
' </tr>\n' +
' <tbody>\n' +
'</table>\n',
table.html()
)
class Facet(object):
def __init__(self, list_of_dicts, key):
self.name = key
self.values = [item[key] for item in each_dict(list_of_dicts)]
def histogram(self):
# How many buckets? This
pass
if __name__ == '__main__':
unittest.main()
| bwbeach/bstat | bstat/data.py | Python | mit | 30,169 | [
"Brian"
] | 75b755aa0369c34b854e6a8b3d46ba1e71ce2867ddfd1642677c6ccce81e1427 |
from asap3 import *
from cPickle import *
from numpy import *
from asap3.testtools import ReportTest
from OpenKIM_modelname import openkimmodel
timeunit = 1.018047e-14 # Seconds
femtosecond = 1e-15 / timeunit # Marginally different from units.fs
print_version(1)
if OpenKIMsupported:
data = load(file("testVerlet.pickle"))
init_pos = array(data["initial"])
init_pos.shape = (-1,3)
init_box = array(data["box"])
init_box.shape = (3,3)
atoms = Atoms(positions=init_pos, cell=init_box)
atoms.set_atomic_numbers(47*ones((len(atoms),)))
atoms.set_calculator(OpenKIMcalculator(openkimmodel))
dyn = VelocityVerlet(atoms, 2 * femtosecond)
dyn.attach(MDLogger(dyn, atoms, '-', peratom=True), interval=5)
etot = None
for i in range(10):
dyn.run(20)
epot = atoms.get_potential_energy() / len(atoms)
ekin = atoms.get_kinetic_energy() / len(atoms)
if etot is None:
etot = epot + ekin
else:
ReportTest("Energy conservation:", epot + ekin, etot, 0.001)
final_pos = array(data["final"])
diff = max(abs(atoms.get_positions().flat - final_pos))
print "Maximal deviation of positions:", diff
ReportTest("Maximal deviation of positions", diff, 0, 1e-9)
#diff = max(abs(atoms.get_stresses().flat - array(data["stress"])))
#print "Maximal deviation of stresses:", diff
#ReportTest("Maximal deviation of stresses", diff, 0, 1e-9)
ReportTest.Summary()
else:
print "OpenKIM support is not compiled into Asap."
| auag92/n2dm | Asap-3.8.4/Test/Verlet_OpenKIM.py | Python | mit | 1,597 | [
"OpenKIM"
] | 53814563d3c36438a2afdd3fbfb5ba6036897c3c5faf5402a53080fd630d4489 |
r"""
Composition Statistics (:mod:`skbio.stats.composition`)
=======================================================
.. currentmodule:: skbio.stats.composition
This module provides functions for compositional data analysis.
Many 'omics datasets are inherently compositional - meaning that they
are best interpreted as proportions or percentages rather than
absolute counts.
Formally, :math:`x` is a composition if :math:`\sum_{i=0}^D x_{i} = c`
and :math:`x_{i} > 0`, :math:`1 \leq i \leq D` and :math:`c` is a real
valued constant and there are :math:`D` components for each
composition. In this module :math:`c=1`. Compositional data can be
analyzed using Aitchison geometry. [1]_
However, in this framework, standard real Euclidean operations such as
addition and multiplication no longer apply. Only operations such as
perturbation and power can be used to manipulate this data.
This module allows two styles of manipulation of compositional data.
Compositional data can be analyzed using perturbation and power
operations, which can be useful for simulation studies. The
alternative strategy is to transform compositional data into the real
space. Right now, the centre log ratio transform (clr) and
the isometric log ratio transform (ilr) [2]_ can be used to accomplish
this. This transform can be useful for performing standard statistical
tools such as parametric hypothesis testing, regressions and more.
The major caveat of using this framework is dealing with zeros. In
the Aitchison geometry, only compositions with nonzero components can
be considered. The multiplicative replacement technique [3]_ can be
used to substitute these zeros with small pseudocounts without
introducing major distortions to the data.
Functions
---------
.. autosummary::
:toctree: generated/
closure
multiplicative_replacement
perturb
perturb_inv
power
inner
clr
clr_inv
ilr
ilr_inv
centralize
References
----------
.. [1] V. Pawlowsky-Glahn. "Lecture Notes on Compositional Data Analysis"
.. [2] J. J. Egozcue. "Isometric Logratio Transformations for
Compositional Data Analysis"
.. [3] J. A. Martin-Fernandez. "Dealing With Zeros and Missing Values in
Compositional Data Sets Using Nonparametric Imputation"
Examples
--------
>>> import numpy as np
Consider a very simple environment with only 3 species. The species
in the environment are equally distributed and their proportions are
equivalent:
>>> otus = np.array([1./3, 1./3., 1./3])
Suppose that an antibiotic kills off half of the population for the
first two species, but doesn't harm the third species. Then the
perturbation vector would be as follows
>>> antibiotic = np.array([1./2, 1./2, 1])
And the resulting perturbation would be
>>> perturb(otus, antibiotic)
array([ 0.25, 0.25, 0.5 ])
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
import scipy.stats as ss
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def closure(mat):
"""
Performs closure to ensure that all elements add up to 1.
Parameters
----------
mat : array_like
a matrix of proportions where
rows = compositions
columns = components
Returns
-------
array_like, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import closure
>>> X = np.array([[2, 2, 6], [4, 4, 2]])
>>> closure(X)
array([[ 0.2, 0.2, 0.6],
[ 0.4, 0.4, 0.2]])
"""
mat = np.atleast_2d(mat)
if np.any(mat < 0):
raise ValueError("Cannot have negative proportions")
if mat.ndim > 2:
raise ValueError("Input matrix can only have two dimensions or less")
mat = mat / mat.sum(axis=1, keepdims=True)
return mat.squeeze()
@experimental(as_of="0.4.0")
def multiplicative_replacement(mat, delta=None):
r"""Replace all zeros with small non-zero values
It uses the multiplicative replacement strategy [1]_ ,
replacing zeros with a small positive :math:`\delta`
and ensuring that the compositions still add up to 1.
Parameters
----------
mat: array_like
a matrix of proportions where
rows = compositions and
columns = components
delta: float, optional
a small number to be used to replace zeros
If delta is not specified, then the default delta is
:math:`\delta = \frac{1}{N^2}` where :math:`N`
is the number of components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
References
----------
.. [1] J. A. Martin-Fernandez. "Dealing With Zeros and Missing Values in
Compositional Data Sets Using Nonparametric Imputation"
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import multiplicative_replacement
>>> X = np.array([[.2,.4,.4, 0],[0,.5,.5,0]])
>>> multiplicative_replacement(X)
array([[ 0.1875, 0.375 , 0.375 , 0.0625],
[ 0.0625, 0.4375, 0.4375, 0.0625]])
"""
mat = closure(mat)
z_mat = (mat == 0)
num_feats = mat.shape[-1]
tot = z_mat.sum(axis=-1, keepdims=True)
if delta is None:
delta = (1. / num_feats)**2
zcnts = 1 - tot * delta
mat = np.where(z_mat, delta, zcnts * mat)
return mat.squeeze()
@experimental(as_of="0.4.0")
def perturb(x, y):
r"""
Performs the perturbation operation.
This operation is defined as
.. math::
x \oplus y = C[x_1 y_1, \ldots, x_D y_D]
:math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
y : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import perturb
>>> x = np.array([.1,.3,.4, .2])
>>> y = np.array([1./6,1./6,1./3,1./3])
>>> perturb(x,y)
array([ 0.0625, 0.1875, 0.5 , 0.25 ])
"""
x, y = closure(x), closure(y)
return closure(x * y)
@experimental(as_of="0.4.0")
def perturb_inv(x, y):
r"""
Performs the inverse perturbation operation.
This operation is defined as
.. math::
x \ominus y = C[x_1 y_1^{-1}, \ldots, x_D y_D^{-1}]
:math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like
a matrix of proportions where
rows = compositions and
columns = components
y : array_like
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import perturb_inv
>>> x = np.array([.1,.3,.4, .2])
>>> y = np.array([1./6,1./6,1./3,1./3])
>>> perturb_inv(x,y)
array([ 0.14285714, 0.42857143, 0.28571429, 0.14285714])
"""
x, y = closure(x), closure(y)
return closure(x / y)
@experimental(as_of="0.4.0")
def power(x, a):
r"""
Performs the power operation.
This operation is defined as follows
.. math::
`x \odot a = C[x_1^a, \ldots, x_D^a]
:math:`C[x]` is the closure operation defined as
.. math::
C[x] = \left[\frac{x_1}{\sum_{i=1}^{D} x_i},\ldots,
\frac{x_D}{\sum_{i=1}^{D} x_i} \right]
for some :math:`D` dimensional real vector :math:`x` and
:math:`D` is the number of components for every composition.
Parameters
----------
x : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
a : float
a scalar float
Returns
-------
numpy.ndarray, np.float64
A matrix of proportions where all of the values
are nonzero and each composition (row) adds up to 1
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import power
>>> x = np.array([.1,.3,.4, .2])
>>> power(x, .1)
array([ 0.23059566, 0.25737316, 0.26488486, 0.24714631])
"""
x = closure(x)
return closure(x**a).squeeze()
@experimental(as_of="0.4.0")
def inner(x, y):
r"""
Calculates the Aitchson inner product.
This inner product is defined as follows
.. math::
\langle x, y \rangle_a =
\frac{1}{2D} \sum\limits_{i=1}^{D} \sum\limits_{j=1}^{D}
\ln\left(\frac{x_i}{x_j}\right) \ln\left(\frac{y_i}{y_j}\right)
Parameters
----------
x : array_like
a matrix of proportions where
rows = compositions and
columns = components
y : array_like
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
inner product result
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import inner
>>> x = np.array([.1, .3, .4, .2])
>>> y = np.array([.2, .4, .2, .2])
>>> inner(x, y)
0.21078524737545556
"""
x = closure(x)
y = closure(y)
a, b = clr(x), clr(y)
return a.dot(b.T)
@experimental(as_of="0.4.0")
def clr(mat):
r"""
Performs centre log ratio transformation.
This function transforms compositions from Aitchison geometry to
the real space. The :math:`clr` transform is both an isometry and an
isomorphism defined on the following spaces
:math:`clr: S^D \rightarrow U`
where :math:`U=
\{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}`
It is defined for a composition :math:`x` as follows:
.. math::
clr(x) = \ln\left[\frac{x_1}{g_m(x)}, \ldots, \frac{x_D}{g_m(x)}\right]
where :math:`g_m(x) = (\prod\limits_{i=1}^{D} x_i)^{1/D}` is the geometric
mean of :math:`x`.
Parameters
----------
mat : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
clr transformed matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import clr
>>> x = np.array([.1, .3, .4, .2])
>>> clr(x)
array([-0.79451346, 0.30409883, 0.5917809 , -0.10136628])
"""
mat = closure(mat)
lmat = np.log(mat)
gm = lmat.mean(axis=-1, keepdims=True)
return (lmat - gm).squeeze()
@experimental(as_of="0.4.0")
def clr_inv(mat):
r"""
Performs inverse centre log ratio transformation.
This function transforms compositions from the real space to
Aitchison geometry. The :math:`clr^{-1}` transform is both an isometry,
and an isomorphism defined on the following spaces
:math:`clr^{-1}: U \rightarrow S^D`
where :math:`U=
\{x :\sum\limits_{i=1}^D x = 0 \; \forall x \in \mathbb{R}^D\}`
This transformation is defined as follows
.. math::
clr^{-1}(x) = C[\exp( x_1, \ldots, x_D)]
Parameters
----------
mat : array_like, float
a matrix of real values where
rows = transformed compositions and
columns = components
Returns
-------
numpy.ndarray
inverse clr transformed matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import clr_inv
>>> x = np.array([.1, .3, .4, .2])
>>> clr_inv(x)
array([ 0.21383822, 0.26118259, 0.28865141, 0.23632778])
"""
return closure(np.exp(mat))
@experimental(as_of="0.4.0")
def ilr(mat, basis=None, check=True):
r"""
Performs isometric log ratio transformation.
This function transforms compositions from Aitchison simplex to
the real space. The :math: ilr` transform is both an isometry,
and an isomorphism defined on the following spaces
:math:`ilr: S^D \rightarrow \mathbb{R}^{D-1}`
The ilr transformation is defined as follows
.. math::
ilr(x) =
[\langle x, e_1 \rangle_a, \ldots, \langle x, e_{D-1} \rangle_a]
where :math:`[e_1,\ldots,e_{D-1}]` is an orthonormal basis in the simplex.
If an orthornormal basis isn't specified, the J. J. Egozcue orthonormal
basis derived from Gram-Schmidt orthogonalization will be used by
default.
Parameters
----------
mat: numpy.ndarray
a matrix of proportions where
rows = compositions and
columns = components
basis: numpy.ndarray, float, optional
orthonormal basis for Aitchison simplex
defaults to J.J.Egozcue orthonormal basis
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import ilr
>>> x = np.array([.1, .3, .4, .2])
>>> ilr(x)
array([-0.7768362 , -0.68339802, 0.11704769])
"""
mat = closure(mat)
if basis is None:
basis = clr_inv(_gram_schmidt_basis(mat.shape[-1]))
elif check:
_check_orthogonality(basis)
return inner(mat, basis)
@experimental(as_of="0.4.0")
def ilr_inv(mat, basis=None, check=True):
r"""
Performs inverse isometric log ratio transform.
This function transforms compositions from the real space to
Aitchison geometry. The :math:`ilr^{-1}` transform is both an isometry,
and an isomorphism defined on the following spaces
:math:`ilr^{-1}: \mathbb{R}^{D-1} \rightarrow S^D`
The inverse ilr transformation is defined as follows
.. math::
ilr^{-1}(x) = \bigoplus\limits_{i=1}^{D-1} x \odot e_i
where :math:`[e_1,\ldots, e_{D-1}]` is an orthonormal basis in the simplex.
If an orthornormal basis isn't specified, the J. J. Egozcue orthonormal
basis derived from Gram-Schmidt orthogonalization will be used by
default.
Parameters
----------
mat: numpy.ndarray, float
a matrix of transformed proportions where
rows = compositions and
columns = components
basis: numpy.ndarray, float, optional
orthonormal basis for Aitchison simplex
defaults to J.J.Egozcue orthonormal basis
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import ilr
>>> x = np.array([.1, .3, .6,])
>>> ilr_inv(x)
array([ 0.34180297, 0.29672718, 0.22054469, 0.14092516])
"""
if basis is None:
basis = _gram_schmidt_basis(mat.shape[-1] + 1)
elif check:
_check_orthogonality(basis)
return clr_inv(np.dot(mat, basis))
@experimental(as_of="0.4.0")
def centralize(mat):
r"""Center data around its geometric average.
Parameters
----------
mat : array_like, float
a matrix of proportions where
rows = compositions and
columns = components
Returns
-------
numpy.ndarray
centered composition matrix
Examples
--------
>>> import numpy as np
>>> from skbio.stats.composition import centralize
>>> X = np.array([[.1,.3,.4, .2],[.2,.2,.2,.4]])
>>> centralize(X)
array([[ 0.17445763, 0.30216948, 0.34891526, 0.17445763],
[ 0.32495488, 0.18761279, 0.16247744, 0.32495488]])
"""
mat = closure(mat)
cen = ss.gmean(mat, axis=0)
return perturb_inv(mat, cen)
def _gram_schmidt_basis(n):
"""
Builds clr transformed basis derived from
gram schmidt orthogonalization
Parameters
----------
n : int
Dimension of the Aitchison simplex
"""
basis = np.zeros((n, n-1))
for j in range(n-1):
i = j + 1
e = np.array([(1/i)]*i + [-1] +
[0]*(n-i-1))*np.sqrt(i/(i+1))
basis[:, j] = e
return basis.T
def _check_orthogonality(basis):
"""
Checks to see if basis is truly orthonormal in the
Aitchison simplex
Parameters
----------
basis: numpy.ndarray
basis in the Aitchison simplex
"""
if not np.allclose(inner(basis, basis), np.identity(len(basis)),
rtol=1e-4, atol=1e-6):
raise ValueError("Aitchison basis is not orthonormal")
| corburn/scikit-bio | skbio/stats/composition.py | Python | bsd-3-clause | 17,500 | [
"scikit-bio"
] | cbdbd2b64c2fab3a433ec6b8860bb277db44ea01d3a607b931c19ca33b2f08ba |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements various transmuter classes.
Transmuters are essentially classes that generate TransformedStructures from
various data sources. They enable the high-throughput generation of new
structures and input files.
It also includes the helper function, batch_write_vasp_input to generate an
entire directory of vasp input files for running.
"""
__author__ = "Shyue Ping Ong, Will Richards"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 4, 2012"
import os
import re
from multiprocessing import Pool
from pymatgen.alchemy.materials import TransformedStructure
from pymatgen.io.vasp.sets import MPRelaxSet
class StandardTransmuter:
"""
An example of a Transmuter object, which performs a sequence of
transformations on many structures to generate TransformedStructures.
.. attribute: transformed_structures
List of all transformed structures.
"""
def __init__(self, transformed_structures, transformations=None,
extend_collection=0, ncores=None):
"""
Initializes a transmuter from an initial list of
:class:`pymatgen.alchemy.materials.TransformedStructure`.
Args:
transformed_structures ([TransformedStructure]): Input transformed
structures
transformations ([Transformations]): New transformations to be
applied to all structures.
extend_collection (int): Whether to use more than one output
structure from one-to-many transformations. extend_collection
can be an int, which determines the maximum branching for each
transformation.
ncores (int): Number of cores to use for applying transformations.
Uses multiprocessing.Pool. Default is None, which implies
serial.
"""
self.transformed_structures = transformed_structures
self.ncores = ncores
if transformations is not None:
for trans in transformations:
self.append_transformation(trans,
extend_collection=extend_collection)
def __getitem__(self, index):
return self.transformed_structures[index]
def __getattr__(self, name):
return [getattr(x, name) for x in self.transformed_structures]
def undo_last_change(self):
"""
Undo the last transformation in the TransformedStructure.
Raises:
IndexError if already at the oldest change.
"""
for x in self.transformed_structures:
x.undo_last_change()
def redo_next_change(self):
"""
Redo the last undone transformation in the TransformedStructure.
Raises:
IndexError if already at the latest change.
"""
for x in self.transformed_structures:
x.redo_next_change()
def __len__(self):
return len(self.transformed_structures)
def append_transformation(self, transformation, extend_collection=False,
clear_redo=True):
"""
Appends a transformation to all TransformedStructures.
Args:
transformation: Transformation to append
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
clear_redo (bool): Whether to clear the redo list. By default,
this is True, meaning any appends clears the history of
undoing. However, when using append_transformation to do a
redo, the redo list should not be cleared to allow multiple
redos.
Returns:
List of booleans corresponding to initial transformed structures
each boolean describes whether the transformation altered the
structure
"""
if self.ncores and transformation.use_multiprocessing:
p = Pool(self.ncores)
# need to condense arguments into single tuple to use map
z = map(
lambda x: (x, transformation, extend_collection, clear_redo),
self.transformed_structures)
new_tstructs = p.map(_apply_transformation, z, 1)
self.transformed_structures = []
for ts in new_tstructs:
self.transformed_structures.extend(ts)
else:
new_structures = []
for x in self.transformed_structures:
new = x.append_transformation(transformation,
extend_collection,
clear_redo=clear_redo)
if new is not None:
new_structures.extend(new)
self.transformed_structures.extend(new_structures)
def extend_transformations(self, transformations):
"""
Extends a sequence of transformations to the TransformedStructure.
Args:
transformations: Sequence of Transformations
"""
for t in transformations:
self.append_transformation(t)
def apply_filter(self, structure_filter):
"""
Applies a structure_filter to the list of TransformedStructures
in the transmuter.
Args:
structure_filter: StructureFilter to apply.
"""
def test_transformed_structure(ts):
return structure_filter.test(ts.final_structure)
self.transformed_structures = list(filter(test_transformed_structure,
self.transformed_structures))
for ts in self.transformed_structures:
ts.append_filter(structure_filter)
def write_vasp_input(self, **kwargs):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{formula}_{number}.
Args:
\\*\\*kwargs: All kwargs supported by batch_write_vasp_input.
"""
batch_write_vasp_input(self.transformed_structures, **kwargs)
def set_parameter(self, key, value):
"""
Add parameters to the transmuter. Additional parameters are stored in
the as_dict() output.
Args:
key: The key for the parameter.
value: The value for the parameter.
"""
for x in self.transformed_structures:
x.other_parameters[key] = value
def add_tags(self, tags):
"""
Add tags for the structures generated by the transmuter.
Args:
tags: A sequence of tags. Note that this should be a sequence of
strings, e.g., ["My awesome structures", "Project X"].
"""
self.set_parameter("tags", tags)
def __str__(self):
output = ["Current structures", "------------"]
for x in self.transformed_structures:
output.append(str(x.final_structure))
return "\n".join(output)
def append_transformed_structures(self, tstructs_or_transmuter):
"""
Method is overloaded to accept either a list of transformed structures
or transmuter, it which case it appends the second transmuter"s
structures.
Args:
tstructs_or_transmuter: A list of transformed structures or a
transmuter.
"""
if isinstance(tstructs_or_transmuter, self.__class__):
self.transformed_structures.extend(tstructs_or_transmuter
.transformed_structures)
else:
for ts in tstructs_or_transmuter:
assert isinstance(ts, TransformedStructure)
self.transformed_structures.extend(tstructs_or_transmuter)
@staticmethod
def from_structures(structures, transformations=None, extend_collection=0):
"""
Alternative constructor from structures rather than
TransformedStructures.
Args:
structures: Sequence of structures
transformations: New transformations to be applied to all
structures
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
Returns:
StandardTransmuter
"""
tstruct = [TransformedStructure(s, []) for s in structures]
return StandardTransmuter(tstruct, transformations, extend_collection)
class CifTransmuter(StandardTransmuter):
"""
Generates a Transmuter from a cif string, possibly containing multiple
structures.
"""
def __init__(self, cif_string, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a Transmuter from a cif string, possibly
containing multiple structures.
Args:
cif_string: A string containing a cif or a series of cifs
transformations: New transformations to be applied to all
structures
primitive: Whether to generate the primitive cell from the cif.
extend_collection: Whether to use more than one output structure
from one-to-many transformations. extend_collection can be a
number, which determines the maximum branching for each
transformation.
"""
transformed_structures = []
lines = cif_string.split("\n")
structure_data = []
read_data = False
for line in lines:
if re.match(r"^\s*data", line):
structure_data.append([])
read_data = True
if read_data:
structure_data[-1].append(line)
for data in structure_data:
tstruct = TransformedStructure.from_cif_string("\n".join(data), [],
primitive)
transformed_structures.append(tstruct)
super().__init__(transformed_structures, transformations,
extend_collection)
@staticmethod
def from_filenames(filenames, transformations=None, primitive=True,
extend_collection=False):
"""
Generates a TransformedStructureCollection from a cif, possibly
containing multiple structures.
Args:
filenames: List of strings of the cif files
transformations: New transformations to be applied to all
structures
primitive: Same meaning as in __init__.
extend_collection: Same meaning as in __init__.
"""
allcifs = []
for fname in filenames:
with open(fname, "r") as f:
allcifs.append(f.read())
return CifTransmuter("\n".join(allcifs), transformations,
primitive=primitive,
extend_collection=extend_collection)
class PoscarTransmuter(StandardTransmuter):
"""
Generates a transmuter from a sequence of POSCARs.
Args:
poscar_string: List of POSCAR strings
transformations: New transformations to be applied to all
structures.
extend_collection: Whether to use more than one output structure
from one-to-many transformations.
"""
def __init__(self, poscar_string, transformations=None,
extend_collection=False):
tstruct = TransformedStructure.from_poscar_string(poscar_string, [])
super().__init__([tstruct], transformations,
extend_collection=extend_collection)
@staticmethod
def from_filenames(poscar_filenames, transformations=None,
extend_collection=False):
"""
Convenient constructor to generates a POSCAR transmuter from a list of
POSCAR filenames.
Args:
poscar_filenames: List of POSCAR filenames
transformations: New transformations to be applied to all
structures.
extend_collection:
Same meaning as in __init__.
"""
tstructs = []
for filename in poscar_filenames:
with open(filename, "r") as f:
tstructs.append(TransformedStructure
.from_poscar_string(f.read(), []))
return StandardTransmuter(tstructs, transformations,
extend_collection=extend_collection)
def batch_write_vasp_input(transformed_structures, vasp_input_set=MPRelaxSet,
output_dir=".", create_directory=True,
subfolder=None,
include_cif=False, **kwargs):
"""
Batch write vasp input for a sequence of transformed structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
transformed_structures: Sequence of TransformedStructures.
vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet to creates
vasp input files from structures.
output_dir: Directory to output files
create_directory (bool): Create the directory if not present.
Defaults to True.
subfolder: Function to create subdirectory name from
transformed_structure.
e.g., lambda x: x.other_parameters["tags"][0] to use the first
tag.
include_cif (bool): Boolean indication whether to output a CIF as
well. CIF files are generally better supported in visualization
programs.
"""
for i, s in enumerate(transformed_structures):
formula = re.sub(r"\s+", "", s.final_structure.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir,
"{}_{}".format(formula, i))
else:
dirname = os.path.join(output_dir, "{}_{}".format(formula, i))
s.write_vasp_input(vasp_input_set, dirname,
create_directory=create_directory, **kwargs)
if include_cif:
from pymatgen.io.cif import CifWriter
writer = CifWriter(s.final_structure)
writer.write_file(os.path.join(dirname, "{}.cif".format(formula)))
def _apply_transformation(inputs):
"""
Helper method for multiprocessing of apply_transformation. Must not be
in the class so that it can be pickled.
Args:
inputs: Tuple containing the transformed structure, the transformation
to be applied, a boolean indicating whether to extend the
collection, and a boolean indicating whether to clear the redo
Returns:
List of output structures (the modified initial structure, plus
any new structures created by a one-to-many transformation)
"""
ts, transformation, extend_collection, clear_redo = inputs
new = ts.append_transformation(transformation, extend_collection,
clear_redo=clear_redo)
o = [ts]
if new:
o.extend(new)
return o
| tschaume/pymatgen | pymatgen/alchemy/transmuters.py | Python | mit | 15,628 | [
"VASP",
"pymatgen"
] | 250f71d9396ebcf92944deb9647c766e2cdeba486e20a0ab4fe246bd132219a7 |
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from nose.plugins.skip import SkipTest
from rootpy.utils.silence import silence_sout
try:
with silence_sout():
import ROOT
from ROOT import (RooFit, RooRealVar, RooGaussian, RooArgusBG,
RooAddPdf, RooArgList, RooArgSet, RooAbsData)
from rootpy.stats import mute_roostats; mute_roostats()
from rootpy import asrootpy
except ImportError:
raise SkipTest("ROOT is not compiled with RooFit and RooStats enabled")
from rootpy.io import TemporaryFile
from nose.tools import assert_true
def test_plottable():
# construct pdf and toy data following example at
# http://root.cern.ch/drupal/content/roofit
# --- Observable ---
mes = RooRealVar("mes", "m_{ES} (GeV)", 5.20, 5.30)
# --- Parameters ---
sigmean = RooRealVar("sigmean", "B^{#pm} mass", 5.28, 5.20, 5.30)
sigwidth = RooRealVar("sigwidth", "B^{#pm} width", 0.0027, 0.001, 1.)
# --- Build Gaussian PDF ---
signal = RooGaussian("signal", "signal PDF", mes, sigmean, sigwidth)
# --- Build Argus background PDF ---
argpar = RooRealVar("argpar", "argus shape parameter", -20.0, -100., -1.)
background = RooArgusBG("background", "Argus PDF",
mes, RooFit.RooConst(5.291), argpar)
# --- Construct signal+background PDF ---
nsig = RooRealVar("nsig", "#signal events", 200, 0., 10000)
nbkg = RooRealVar("nbkg", "#background events", 800, 0., 10000)
model = RooAddPdf("model", "g+a",
RooArgList(signal, background),
RooArgList(nsig, nbkg))
# --- Generate a toyMC sample from composite PDF ---
data = model.generate(RooArgSet(mes), 2000)
# --- Perform extended ML fit of composite PDF to toy data ---
fitresult = model.fitTo(data, RooFit.Save(), RooFit.PrintLevel(-1))
# --- Plot toy data and composite PDF overlaid ---
mesframe = asrootpy(mes.frame())
type(mesframe)
data.plotOn(mesframe)
model.plotOn(mesframe)
for obj in mesframe.objects:
assert_true(obj)
for curve in mesframe.curves:
assert_true(curve)
for hist in mesframe.data_hists:
assert_true(hist)
assert_true(mesframe.plotvar)
with TemporaryFile():
mesframe.Write()
| qbuat/rootpy | rootpy/stats/tests/test_plottable.py | Python | gpl-3.0 | 2,365 | [
"Gaussian"
] | 19207484a4a815723c0c2602e9ac7251046652285b39ca44f8fccc4f4071f800 |
#!/usr/bin/env python
"""\
basis_converter.py Convert a Jaguar basis record to a native python format.
This program is part of the PyQuante quantum chemistry suite.
PyQuante is copyright (c) 2002 Richard P. Muller. All Rights Reserved.
You may contact the author at rpm@wag.caltech.edu.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307
"""
import sys,string
sym2no = {
'X' : 0, 'H' : 1, 'He' : 2,
'Li' : 3, 'Be' : 4, 'B' : 5, 'C' : 6, 'N' : 7,
'O' : 8, 'F' : 9, 'Ne' : 10,
'Na' : 11, 'Mg' : 12, 'Al' : 13, 'Si' : 14,
'P' : 15, 'S' : 16, 'Cl' : 17, 'Ar' : 18,
'K' : 19, 'Ca' : 20, 'Sc':21, 'Ti':22, 'V':23,'Cr':24,'Mn':25,
'Fe' : 26, 'Co':27, 'Ni':28, 'Cu':29,'Zn':30,
'Ga' : 31,'Ge':32,'As':33,'Se':34,'Br':35,'Kr':36,
'Rb':37, 'Sr':38,'Y':39,'Zr':40,'Nb':41,'Mo':42,'Tc':43,
'Ru' : 44,'Rh':45,'Pd':46,'Ag':47,'Cd':48,'In':49,
'Sn':50,'Sb':51,'Te':52,'I':53,'Xe':54,
'Cs':55,'Ba':56,'La':57,'Ce':58,'Pr':59,'Nd':60,'Pm':61,'Sm':62,
'Eu':63,'Gd':64,'Tb':65,'Dy':66,'Ho':67,'Er':68,'Tm':69,'Yb':70,
'Lu':71,'Hf':72,'Ta':73,'W':74,'Re':75,'Os':76,'Ir':77,'Pt':78,
'Au':79,'Hg':80,'Tl':81,'Pb':82,'Bi':83,'At':85,'Rn':86,
'U' : 92,
'x' : 0, 'h' : 1, 'he' : 2,
'li' : 3, 'be' : 4, 'b' : 5, 'c' : 6, 'n' : 7,
'o' : 8, 'f' : 9, 'ne' : 10,
'na' : 11, 'mg' : 12, 'al' : 13, 'si' : 14,
'p' : 15, 's' : 16, 'cl' : 17, 'ar' : 18,
'k' : 19, 'ca' : 20, 'sc':21, 'ti':22, 'v':23,'cr':24,'mn':25,
'fe' : 26, 'co':27, 'ni':28, 'cu':29,'zn':30,
'ga' : 31,'ge':32,'as':33,'se':34,'br':35,'kr':36,
'rb':37, 'sr':38,'y':39,'zr':40,'nb':41,'mo':42,'tc':43,
'ru' : 44,'rh':45,'pd':46,'ag':47,'cd':48,'in':49,
'sn':50,'sb':51,'te':52,'i':53,'xe':54,
'cs':55,'ba':56,'la':57,'ce':58,'pr':59,'nd':60,'pm':61,'sm':62,
'eu':63,'gd':64,'tb':65,'dy':66,'ho':67,'er':68,'tm':69,'yb':70,
'lu':71,'hf':72,'ta':73,'w':74,'re':75,'os':76,'ir':77,'pt':78,
'au':79,'hg':80,'tl':81,'pb':82,'bi':83,'at':85,'rn':86,
'u' : 92,
}
def main(filename="basis_631ss.dat"):
outfilename = string.replace(filename,'.dat','.py')
file = open(filename)
bfs = []
while 1:
line = file.readline()
if not line: break
words = string.split(line)
sym = words[0]
atno = sym2no[sym]
nat = len(bfs)
if len(bfs) < atno+1:
for i in range(atno+1-len(bfs)): bfs.append([])
while 1:
line = file.readline()
if not line: break
words = string.split(line)
if len(words) < 1: break
if words[0] == '****': break
type,nprim = words[0],int(words[2])
try:
nprim2 = int(words[3])
nprim = nprim + nprim2
except:
pass
prims = []
pprims = []
for i in range(nprim):
line = file.readline()
words = string.split(line)
expnt = float(words[0])
coef = float(words[1])
prims.append((expnt,coef))
if type == 'SP':
coef2 = float(words[2])
pprims.append((expnt,coef2))
if type == 'SP':
bfs[atno].append(('S',prims))
bfs[atno].append(('P',pprims))
else:
bfs[atno].append((type,prims))
file.close()
file = open(outfilename,'w')
file.write('basis = [\n')
for bf in bfs:
if bf:
file.write(' [\n')
for type,prims in bf:
file.write(' (\'%s\',[\n' % type)
for expnt,coef in prims:
file.write(' (%f, %f),\n' % (expnt,coef))
file.write(' ]),\n')
file.write(' ],\n')
else:
file.write(' None,\n')
file.write(' ]\n')
if __name__ == '__main__':
if len(sys.argv) < 2:
main()
else:
main(sys.argv[1])
| berquist/PyQuante | Data/basis_converter_jaguar.py | Python | bsd-3-clause | 4,708 | [
"Jaguar"
] | 9d3805773f759943026750d131becf1f1c8c7e9893311f496a1111f62ab2ec85 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# License
# -------
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Legal Notice
# ------------
# OPENFOAM is a trademark owned by OpenCFD Ltd
# (producer and distributor of the OpenFOAM software via www.openfoam.com).
# The trademark information must remain visible and unadulterated in this
# file and via the "spack info" and comply with the term set by
# http://openfoam.com/legal/trademark-policy.php
#
# This file is not part of OpenFOAM, nor does it constitute a component of an
# OpenFOAM distribution.
#
##############################################################################
#
# Notes
# - mpi handling: WM_MPLIB=USER and provide wmake rules for special purpose
# 'USER and 'USERMPI' mpi implementations.
# The choice of 'USER' vs 'USERMPI' may change in the future.
#
# Changes
# 2017-03-28 Mark Olesen <mark.olesen@esi-group.com>
# - avoid installing intermediate targets.
# - reworked to mirror the openfoam-com package.
# If changes are needed here, consider if they need applying there too.
#
# Known issues
# - Combining +parmgridgen with +float32 probably won't work.
#
##############################################################################
import glob
import re
import shutil
import os
from spack import *
from spack.pkg.builtin.openfoam_com import OpenfoamArch
from spack.pkg.builtin.openfoam_com import add_extra_files
from spack.pkg.builtin.openfoam_com import write_environ
from spack.pkg.builtin.openfoam_com import rewrite_environ_files
class FoamExtend(Package):
"""The Extend Project is a fork of the OpenFOAM opensource library
for Computational Fluid Dynamics (CFD).
This offering is not approved or endorsed by OpenCFD Ltd,
producer and distributor of the OpenFOAM software via www.openfoam.com,
and owner of the OPENFOAM trademark.
"""
homepage = "http://www.extend-project.de/"
version('4.0', git='http://git.code.sf.net/p/foam-extend/foam-extend-4.0')
version('3.2', git='http://git.code.sf.net/p/foam-extend/foam-extend-3.2')
version('3.1', git='http://git.code.sf.net/p/foam-extend/foam-extend-3.1')
version('3.0', git='http://git.code.sf.net/p/foam-extend/foam-extend-3.0')
# variant('int64', default=False,
# description='Compile with 64-bit label')
variant('float32', default=False,
description='Compile with 32-bit scalar (single-precision)')
variant('paraview', default=False,
description='Build paraview plugins (eg, paraFoam)')
variant('scotch', default=True,
description='With scotch for decomposition')
variant('ptscotch', default=True,
description='With ptscotch for decomposition')
variant('metis', default=True,
description='With metis for decomposition')
variant('parmetis', default=True,
description='With parmetis for decomposition')
variant('parmgridgen', default=True,
description='With parmgridgen support')
variant('source', default=True,
description='Install library/application sources and tutorials')
provides('openfoam')
depends_on('mpi')
depends_on('python')
depends_on('zlib')
depends_on('flex', type='build')
depends_on('cmake', type='build')
depends_on('scotch~metis', when='~ptscotch+scotch')
depends_on('scotch~metis+mpi', when='+ptscotch')
depends_on('metis@5:', when='+metis')
depends_on('parmetis', when='+parmetis')
# mgridgen is statically linked
depends_on('parmgridgen', when='+parmgridgen', type='build')
depends_on('paraview@:5.0.1', when='+paraview')
# General patches
common = ['spack-Allwmake', 'README-spack']
assets = []
# Some user config settings
config = {
'label-size': False, # <- No int32/int64 support
'mplib': 'USERMPI', # USER | USERMPI
}
# The openfoam architecture, compiler information etc
_foam_arch = None
# Content for etc/prefs.{csh,sh}
etc_prefs = {}
# Content for etc/config.{csh,sh}/ files
etc_config = {}
phases = ['configure', 'build', 'install']
build_script = './spack-Allwmake' # <- Added by patch() method.
#
# - End of definitions / setup -
#
def setup_environment(self, spack_env, run_env):
run_env.set('FOAM_INST_DIR', os.path.dirname(self.projectdir)),
run_env.set('FOAM_PROJECT_DIR', self.projectdir)
run_env.set('WM_PROJECT_DIR', self.projectdir)
for d in ['wmake', self.archbin]: # bin already added automatically
run_env.prepend_path('PATH', join_path(self.projectdir, d))
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
"""Provide location of the OpenFOAM project.
This is identical to the WM_PROJECT_DIR value, but we avoid that
variable since it would mask the normal OpenFOAM cleanup of
previous versions.
"""
spack_env.set('FOAM_PROJECT_DIR', self.projectdir)
@property
def projectdir(self):
"""Absolute location of project directory: WM_PROJECT_DIR/"""
return self.prefix # <- install directly under prefix
@property
def foam_arch(self):
if not self._foam_arch:
self._foam_arch = OpenfoamArch(self.spec, **self.config)
return self._foam_arch
@property
def archbin(self):
"""Relative location of architecture-specific executables"""
return join_path('applications', 'bin', self.foam_arch)
@property
def archlib(self):
"""Relative location of architecture-specific libraries"""
return join_path('lib', self.foam_arch)
def patch(self):
"""Adjust OpenFOAM build for spack.
Where needed, apply filter as an alternative to normal patching."""
add_extra_files(self, self.common, self.assets)
# Adjust ParMGridGen - this is still a mess
files = [
'src/dbns/Make/options',
'src/fvAgglomerationMethods/MGridGenGamgAgglomeration/Make/options' # noqa: E501
]
for f in files:
filter_file(r'-lMGridGen', r'-lmgrid', f, backup=False)
# Adjust for flex version check
files = [
'src/thermophysicalModels/reactionThermo/chemistryReaders/chemkinReader/chemkinLexer.L', # noqa: E501
'src/surfMesh/surfaceFormats/stl/STLsurfaceFormatASCII.L', # noqa: E501
'src/meshTools/triSurface/triSurface/interfaces/STL/readSTLASCII.L', # noqa: E501
'applications/utilities/preProcessing/fluentDataToFoam/fluentDataToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/gambitToFoam/gambitToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/fluent3DMeshToFoam/fluent3DMeshToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/ansysToFoam/ansysToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/fluentMeshToFoam/fluentMeshToFoam.L', # noqa: E501
'applications/utilities/mesh/conversion/fluent3DMeshToElmer/fluent3DMeshToElmer.L' # noqa: E501
]
for f in files:
filter_file(
r'#if YY_FLEX_SUBMINOR_VERSION < 34',
r'#if YY_FLEX_MAJOR_VERSION <= 2 && YY_FLEX_MINOR_VERSION <= 5 && YY_FLEX_SUBMINOR_VERSION < 34', # noqa: E501
f, backup=False)
def configure(self, spec, prefix):
"""Make adjustments to the OpenFOAM configuration files in their various
locations: etc/bashrc, etc/config.sh/FEATURE and customizations that
don't properly fit get placed in the etc/prefs.sh file (similiarly for
csh).
"""
# Content for etc/prefs.{csh,sh}
self.etc_prefs = {
'000': { # Sort first
'compilerInstall': 'System',
},
'001': {},
'cmake': {
'CMAKE_DIR': spec['cmake'].prefix,
'CMAKE_BIN_DIR': spec['cmake'].prefix.bin,
},
'python': {
'PYTHON_DIR': spec['python'].home,
'PYTHON_BIN_DIR': spec['python'].home.bin,
},
'flex': {
'FLEX_SYSTEM': 1,
'FLEX_DIR': spec['flex'].prefix,
},
'bison': {
'BISON_SYSTEM': 1,
'BISON_DIR': spec['flex'].prefix,
},
'zlib': {
'ZLIB_SYSTEM': 1,
'ZLIB_DIR': spec['zlib'].prefix,
},
}
# Adjust configuration via prefs - sort second
self.etc_prefs['001'].update(self.foam_arch.foam_dict())
if '+scotch' in spec or '+ptscotch' in spec:
pkg = spec['scotch'].prefix
self.etc_prefs['scotch'] = {
'SCOTCH_SYSTEM': 1,
'SCOTCH_DIR': pkg,
'SCOTCH_BIN_DIR': pkg.bin,
'SCOTCH_LIB_DIR': pkg.lib,
'SCOTCH_INCLUDE_DIR': pkg.include,
}
if '+metis' in spec:
pkg = spec['metis'].prefix
self.etc_prefs['metis'] = {
'METIS_SYSTEM': 1,
'METIS_DIR': pkg,
'METIS_BIN_DIR': pkg.bin,
'METIS_LIB_DIR': pkg.lib,
'METIS_INCLUDE_DIR': pkg.include,
}
if '+parmetis' in spec:
pkg = spec['parmetis'].prefix
self.etc_prefs['parametis'] = {
'PARMETIS_SYSTEM': 1,
'PARMETIS_DIR': pkg,
'PARMETIS_BIN_DIR': pkg.bin,
'PARMETIS_LIB_DIR': pkg.lib,
'PARMETIS_INCLUDE_DIR': pkg.include,
}
if '+parmgridgen' in spec:
pkg = spec['parmgridgen'].prefix
self.etc_prefs['parmgridgen'] = {
'PARMGRIDGEN_SYSTEM': 1,
'PARMGRIDGEN_DIR': pkg,
'PARMGRIDGEN_BIN_DIR': pkg.bin,
'PARMGRIDGEN_LIB_DIR': pkg.lib,
'PARMGRIDGEN_INCLUDE_DIR': pkg.include,
}
if '+paraview' in self.spec:
self.etc_prefs['paraview'] = {
'PARAVIEW_SYSTEM': 1,
'PARAVIEW_DIR': spec['paraview'].prefix,
'PARAVIEW_BIN_DIR': spec['paraview'].prefix.bin,
}
self.etc_prefs['qt'] = {
'QT_SYSTEM': 1,
'QT_DIR': spec['qt'].prefix,
'QT_BIN_DIR': spec['qt'].prefix.bin,
}
# Write prefs files according to the configuration.
# Only need prefs.sh for building, but install both for end-users
write_environ(
self.etc_prefs,
posix=join_path('etc', 'prefs.sh'),
cshell=join_path('etc', 'prefs.csh'))
def build(self, spec, prefix):
"""Build using the OpenFOAM Allwmake script, with a wrapper to source
its environment first.
Only build if the compiler is known to be supported.
"""
self.foam_arch.has_rule(self.stage.source_path)
self.foam_arch.create_rules(self.stage.source_path, self)
args = []
if self.parallel: # Build in parallel? - pass via the environment
os.environ['WM_NCOMPPROCS'] = str(make_jobs)
builder = Executable(self.build_script)
builder(*args)
def install(self, spec, prefix):
"""Install under the projectdir"""
opts = str(self.foam_arch)
# Fairly ugly since intermediate targets are scattered inside sources
appdir = 'applications'
projdir = os.path.basename(self.projectdir)
mkdirp(self.projectdir, join_path(self.projectdir, appdir))
# Filtering: bashrc, cshrc
edits = {
'WM_PROJECT_INST_DIR': os.path.dirname(self.projectdir),
'WM_PROJECT_DIR': join_path('$WM_PROJECT_INST_DIR', projdir),
}
# All top-level files, except spack build info and possibly Allwmake
if '+source' in spec:
ignored = re.compile(r'^spack-.*')
else:
ignored = re.compile(r'^(Allclean|Allwmake|spack-).*')
files = [
f for f in glob.glob("*")
if os.path.isfile(f) and not ignored.search(f)
]
for f in files:
install(f, self.projectdir)
# Install directories. install applications/bin directly
# Install 'etc' before 'bin' (for symlinks)
for d in ['etc', 'bin', 'wmake', 'lib', join_path(appdir, 'bin')]:
install_tree(
d,
join_path(self.projectdir, d),
symlinks=True)
if '+source' in spec:
subitem = join_path(appdir, 'Allwmake')
install(subitem, join_path(self.projectdir, subitem))
ignored = [opts] # Ignore intermediate targets
for d in ['src', 'tutorials']:
install_tree(
d,
join_path(self.projectdir, d),
ignore=shutil.ignore_patterns(*ignored),
symlinks=True)
for d in ['solvers', 'utilities']:
install_tree(
join_path(appdir, d),
join_path(self.projectdir, appdir, d),
ignore=shutil.ignore_patterns(*ignored),
symlinks=True)
etc_dir = join_path(self.projectdir, 'etc')
rewrite_environ_files( # Adjust etc/bashrc and etc/cshrc
edits,
posix=join_path(etc_dir, 'bashrc'),
cshell=join_path(etc_dir, 'cshrc'))
self.install_links()
def install_links(self):
"""Add symlinks into bin/, lib/ (eg, for other applications)"""
# Make build log visible - it contains OpenFOAM-specific information
with working_dir(self.projectdir):
os.symlink(
join_path('.spack', 'build.out'),
join_path('log.' + str(self.foam_arch)))
# -----------------------------------------------------------------------------
| skosukhin/spack | var/spack/repos/builtin/packages/foam-extend/package.py | Python | lgpl-2.1 | 15,255 | [
"ParaView"
] | 51c43e51594113ba732642dca61c8a5d85052963e6568f9be1d60741c37281a0 |
import cgen as c
from devito.arch import AMDGPUX, NVIDIAX
from devito.ir import Call, ParallelIteration, FindSymbols
from devito.passes.iet.definitions import DeviceAwareDataManager
from devito.passes.iet.orchestration import Orchestrator
from devito.passes.iet.parpragma import PragmaDeviceAwareTransformer, PragmaLangBB
from devito.passes.iet.languages.C import CBB
from devito.passes.iet.languages.openmp import OmpRegion, OmpIteration
from devito.passes.iet.languages.utils import make_clause_reduction
from devito.passes.iet.misc import is_on_device
from devito.symbolics import DefFunction, Macro
from devito.tools import prod
__all__ = ['DeviceAccizer', 'DeviceAccDataManager', 'AccOrchestrator']
class DeviceAccIteration(ParallelIteration):
@classmethod
def _make_construct(cls, **kwargs):
return 'acc parallel loop'
@classmethod
def _make_clauses(cls, ncollapse=None, reduction=None, **kwargs):
clauses = []
clauses.append('collapse(%d)' % (ncollapse or 1))
if reduction:
clauses.append(make_clause_reduction(reduction))
symbols = FindSymbols().visit(kwargs['nodes'])
deviceptrs = [i.name for i in symbols if i.is_Array and i._mem_default]
presents = [i.name for i in symbols
if (i.is_AbstractFunction and
is_on_device(i, kwargs['gpu_fit']) and
i.name not in deviceptrs)]
# The NVC 20.7 and 20.9 compilers have a bug which triggers data movement for
# indirectly indexed arrays (e.g., a[b[i]]) unless a present clause is used
if presents:
clauses.append("present(%s)" % ",".join(presents))
if deviceptrs:
clauses.append("deviceptr(%s)" % ",".join(deviceptrs))
return clauses
@classmethod
def _process_kwargs(cls, **kwargs):
kwargs = super()._process_kwargs(**kwargs)
kwargs.pop('gpu_fit', None)
kwargs.pop('schedule', None)
kwargs.pop('parallel', False)
kwargs.pop('chunk_size', None)
kwargs.pop('nthreads', None)
return kwargs
class AccBB(PragmaLangBB):
mapper = {
# Misc
'name': 'OpenACC',
'header': 'openacc.h',
# Platform mapping
AMDGPUX: Macro('acc_device_radeon'),
NVIDIAX: Macro('acc_device_nvidia'),
# Runtime library
'init': lambda args:
Call('acc_init', args),
'num-devices': lambda args:
DefFunction('acc_get_num_devices', args),
'set-device': lambda args:
Call('acc_set_device_num', args),
# Pragmas
'atomic': c.Pragma('acc atomic update'),
'map-enter-to': lambda i, j:
c.Pragma('acc enter data copyin(%s%s)' % (i, j)),
'map-enter-to-wait': lambda i, j, k:
(c.Pragma('acc enter data copyin(%s%s) async(%s)' % (i, j, k)),
c.Pragma('acc wait(%s)' % k)),
'map-enter-alloc': lambda i, j:
c.Pragma('acc enter data create(%s%s)' % (i, j)),
'map-present': lambda i, j:
c.Pragma('acc data present(%s%s)' % (i, j)),
'map-update': lambda i, j:
c.Pragma('acc exit data copyout(%s%s)' % (i, j)),
'map-update-host': lambda i, j:
c.Pragma('acc update self(%s%s)' % (i, j)),
'map-update-wait-host': lambda i, j, k:
(c.Pragma('acc update self(%s%s) async(%s)' % (i, j, k)),
c.Pragma('acc wait(%s)' % k)),
'map-update-device': lambda i, j:
c.Pragma('acc update device(%s%s)' % (i, j)),
'map-update-wait-device': lambda i, j, k:
(c.Pragma('acc update device(%s%s) async(%s)' % (i, j, k)),
c.Pragma('acc wait(%s)' % k)),
'map-release': lambda i, j, k:
c.Pragma('acc exit data delete(%s%s)%s' % (i, j, k)),
'map-exit-delete': lambda i, j, k:
c.Pragma('acc exit data delete(%s%s)%s' % (i, j, k)),
'device-alloc': lambda i:
'acc_malloc(%s)' % i,
'device-free': lambda i:
'acc_free(%s)' % i
}
mapper.update(CBB.mapper)
Region = OmpRegion
HostIteration = OmpIteration # Host parallelism still goes via OpenMP
DeviceIteration = DeviceAccIteration
@classmethod
def _map_to_wait(cls, f, imask=None, queueid=None):
sections = cls._make_sections_from_imask(f, imask)
return cls.mapper['map-enter-to-wait'](f.name, sections, queueid)
@classmethod
def _map_present(cls, f, imask=None):
sections = cls._make_sections_from_imask(f, imask)
return cls.mapper['map-present'](f.name, sections)
@classmethod
def _map_delete(cls, f, imask=None, devicerm=None):
sections = cls._make_sections_from_imask(f, imask)
if devicerm is not None:
cond = ' if(%s)' % devicerm.name
else:
cond = ''
return cls.mapper['map-exit-delete'](f.name, sections, cond)
@classmethod
def _map_update_wait_host(cls, f, imask=None, queueid=None):
sections = cls._make_sections_from_imask(f, imask)
return cls.mapper['map-update-wait-host'](f.name, sections, queueid)
@classmethod
def _map_update_wait_device(cls, f, imask=None, queueid=None):
sections = cls._make_sections_from_imask(f, imask)
return cls.mapper['map-update-wait-device'](f.name, sections, queueid)
class DeviceAccizer(PragmaDeviceAwareTransformer):
lang = AccBB
# Note: there is no need to override `make_gpudirect` since acc_malloc is
# used to allocate the buffers passed to the various MPI calls, which will
# then receive device points
class DeviceAccDataManager(DeviceAwareDataManager):
lang = AccBB
def _alloc_array_on_high_bw_mem(self, site, obj, storage):
if obj._mem_mapped:
# posix_memalign + copy-to-device
super()._alloc_array_on_high_bw_mem(site, obj, storage)
else:
# acc_malloc -- the Array only resides on the device, ie, it never
# needs to be accessed on the host
assert obj._mem_default
size_trunkated = "".join("[%s]" % i for i in obj.symbolic_shape[1:])
decl = c.Value(obj._C_typedata, "(*%s)%s" % (obj.name, size_trunkated))
cast = "(%s (*)%s)" % (obj._C_typedata, size_trunkated)
size_full = "sizeof(%s[%s])" % (obj._C_typedata, prod(obj.symbolic_shape))
alloc = "%s %s" % (cast, self.lang['device-alloc'](size_full))
init = c.Initializer(decl, alloc)
free = c.Statement(self.lang['device-free'](obj.name))
storage.update(obj, site, allocs=init, frees=free)
class AccOrchestrator(Orchestrator):
lang = AccBB
| opesci/devito | devito/passes/iet/languages/openacc.py | Python | mit | 6,786 | [
"VisIt"
] | dcc44395ebdd6a7d4131df4870747a9dd98373f6dcb6a50cd1bc24206c784af3 |
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Stamp_Uploaded_Files: A WebSubmit Function whose job is to stamp given
files that were uploaded during a submission.
"""
__revision__ = "$Id$"
from invenio.errorlib import register_exception
from invenio import websubmit_file_stamper
from invenio.websubmit_config import InvenioWebSubmitFunctionWarning, \
InvenioWebSubmitFunctionError, InvenioWebSubmitFileStamperError
import os.path, shutil, re
def Stamp_Uploaded_Files(parameters, curdir, form, user_info=None):
"""
Stamp certain files that have been uploaded during a submission.
@param parameters: (dictionary) - must contain:
+ latex_template: (string) - the name of the LaTeX template that
should be used for the creation of the stamp.
+ latex_template_vars: (string) - a string-ified dictionary of
variables to be replaced in the LaTeX template and the values
(or names of files in curdir containing the values) with which to
replace them.
E.G.:
{ 'TITLE' : 'DEMOTHESIS_TITLE',
'DATE' : 'DEMOTHESIS_DATE'
}
+ files_to_be_stamped: (string) - The directories in which files
should be stamped: This is a comma-separated list of directory
names. E.g.:
DEMOTHESIS_MAIN,DEMOTHESIS_ADDITIONAL
+ stamp: (string) - the type of stamp to be applied to the files.
should be one of:
+ first (only the first page is stamped);
+ all (all pages are stamped);
+ coverpage (a separate cover-page is added to the file as a
first page);
If all goes according to plan, for each directory in which files are to
be stamped, the original, unstamped files should be found in a
directory 'files_before_stamping/DIRNAME', and the stamped versions
should be found under 'files/DIRNAME'. E.g., for DEMOTHESIS_Main:
- Unstamped: files_before_stamping/DEMOTHESIS_Main
- Stamped: files/DEMOTHESIS_Main
"""
## The file stamper needs to be called with a dictionary of options of
## the following format:
## { 'latex-template' : "", ## TEMPLATE_NAME
## 'latex-template-var' : {}, ## TEMPLATE VARIABLES
## 'input-file' : "", ## INPUT FILE
## 'output-file' : "", ## OUTPUT FILE
## 'stamp' : "", ## STAMP TYPE
## 'verbosity' : 0, ## VERBOSITY (we don't care about it)
## }
file_stamper_options = { 'latex-template' : "",
'latex-template-var' : { },
'input-file' : "",
'output-file' : "",
'stamp' : "",
'verbosity' : 0,
}
## A dictionary of arguments to be passed to visit_for_stamping:
visit_for_stamping_arguments = { 'curdir' : curdir,
'file_stamper_options' : \
file_stamper_options,
'user_info' : user_info
}
## Start by getting the parameter-values from WebSubmit:
## The name of the LaTeX template to be used for stamp creation:
latex_template = "%s" % ((type(parameters['latex_template']) is str \
and parameters['latex_template']) or "")
## A string containing the variables/values that should be substituted
## in the final (working) LaTeX template:
latex_template_vars_string = "%s" % \
((type(parameters['latex_template_vars']) is str \
and parameters['latex_template_vars']) or "")
## The type of stamp to be applied to the file(s):
stamp = "%s" % ((type(parameters['stamp']) is str and \
parameters['stamp'].lower()) or "")
## The directories in which files should be stamped:
## This is a comma-separated list of directory names. E.g.:
## DEMOTHESIS_MAIN,DEMOTHESIS_ADDITIONAL
stamp_content_of = "%s" % ((type(parameters['files_to_be_stamped']) \
is str and parameters['files_to_be_stamped']) \
or "")
## Now split the list (of directories in which to stamp files) on commas:
if stamp_content_of.strip() != "":
stamping_locations = stamp_content_of.split(",")
else:
stamping_locations = []
if len(stamping_locations) == 0:
## If there are no items to be stamped, don't continue:
return ""
## Strip the LaTeX filename into the basename (All templates should be
## in the template repository):
latex_template = os.path.basename(latex_template)
## Convert the string of latex template variables into a dictionary
## of search-term/replacement-term pairs:
latex_template_vars = get_dictionary_from_string(latex_template_vars_string)
## For each of the latex variables, check in `CURDIR' for a file with that
## name. If found, use it's contents as the template-variable's value.
## If not, just use the raw value string already held by the template
## variable:
latex_template_varnames = latex_template_vars.keys()
for varname in latex_template_varnames:
## Get this variable's value:
varvalue = latex_template_vars[varname].strip()
if not ((varvalue.find("date(") == 0 and varvalue[-1] == ")") or \
(varvalue.find("include(") == 0 and varvalue[-1] == ")")) \
and varvalue != "":
## We don't want to interfere with date() or include() directives,
## so we only do this if the variable value didn't contain them:
##
## Is this variable value the name of a file in the current
## submission's working directory, from which a literal value for
## use in the template should be extracted? If yes, it will
## begin with "FILE:". If no, we leave the value exactly as it is.
if varvalue.upper().find("FILE:") == 0:
## The value to be used is to be taken from a file. Clean the
## file name and if it's OK, extract that value from the file.
##
seekvalue_fname = varvalue[5:].strip()
seekvalue_fname = os.path.basename(seekvalue_fname).strip()
if seekvalue_fname != "":
## Attempt to extract the value from the file:
if os.access("%s/%s" % (curdir, seekvalue_fname), \
os.R_OK|os.F_OK):
## The file exists. Extract its value:
try:
repl_file_val = \
open("%s/%s" \
% (curdir, seekvalue_fname), "r").readlines()
except IOError:
## The file was unreadable.
err_msg = "Error in Stamp_Uploaded_Files: The " \
"function attempted to read a LaTex " \
"template variable value from the " \
"following file in the current " \
"submission's working directory: " \
"[%s]. However, an unexpected error " \
"was encountered when doing so. " \
"Please inform the administrator." \
% seekvalue_fname
register_exception(req=user_info['req'])
raise InvenioWebSubmitFunctionError(err_msg)
else:
final_varval = ""
for line in repl_file_val:
final_varval += line
final_varval = final_varval.rstrip()
## Replace the variable value with that which has
## been read from the file:
latex_template_vars[varname] = final_varval
else:
## The file didn't actually exist in the current
## submission's working directory. Use an empty
## value:
latex_template_vars[varname] = ""
else:
## The filename was not valid.
err_msg = "Error in Stamp_Uploaded_Files: The function " \
"was configured to read a LaTeX template " \
"variable from a file with the following " \
"instruction: [%s --> %s]. The filename, " \
"however, was not considered valid. Please " \
"report this to the administrator." \
% (varname, varvalue)
raise InvenioWebSubmitFunctionError(err_msg)
## Put the 'fixed' values into the file_stamper_options dictionary:
file_stamper_options['latex-template'] = latex_template
file_stamper_options['latex-template-var'] = latex_template_vars
file_stamper_options['stamp'] = stamp
for stampdir in stamping_locations:
## Create the full path to the stamp directory - it is considered
## to be under 'curdir' - the working directory for the current
## submission:
path_to_stampdir = "%s/files/%s" % (curdir, stampdir.strip())
## Call os.path.walk, passing it the path to the directory to be
## walked, the visit_for_stamping function (which will call the
## file-stamper for each file within that directory), and the
## dictionary of options to be passed to the file-stamper:
try:
os.path.walk(path_to_stampdir, \
visit_for_stamping, \
visit_for_stamping_arguments)
except InvenioWebSubmitFunctionWarning:
## Unable to stamp the files in stampdir. Register the exception
## and continue to try to stamp the files in the other stampdirs:
## FIXME - The original exception was registered in 'visit'.
## Perhaps we should just send the message contained in this
## warning to the admin?
register_exception(req=user_info['req'])
continue
except InvenioWebSubmitFunctionError, err:
## Unexpected error in stamping. The admin should be contacted
## because it has resulted in an unstable situation with the
## files. They are no longer in a well-defined state - some may
## have been lost and manual intervention by the admin is needed.
## FIXME - should this be reported here, or since we propagate it
## up to websubmit_engine anyway, should we let it register it?
register_exception(req=user_info['req'])
raise err
return ""
def visit_for_stamping(visit_for_stamping_arguments, dirname, filenames):
"""Visitor function called by os.path.walk.
This function takes a directory and a list of files in that directory
and for each file, calls the websubmit_file_stamper on it.
When a file is stamped, the original is moved away into a directory
of unstamped files and the new, stamped version is moved into its
place.
@param visit_for_stamping_arguments: (dictionary) of arguments needed
by this function. Must contain 'curdir', 'user_info' and
'file_stamper_options' members.
@param dirname: (string) - the path to the directory in which the
files are to be stamped.
@param filenames: (list) - the names of each file in dirname. An
attempt will be made to stamp each of these files.
@Exceptions Raised:
+ InvenioWebSubmitFunctionWarning;
+ InvenioWebSubmitFunctionError;
"""
## Get the dictionary of options to pass to the stamper:
file_stamper_options = visit_for_stamping_arguments['file_stamper_options']
## Create a directory to store original files before stamping:
dirname_files_pre_stamping = dirname.replace("/files/", \
"/files_before_stamping/", 1)
if not os.path.exists(dirname_files_pre_stamping):
try:
os.makedirs(dirname_files_pre_stamping)
except OSError, err:
## Unable to make a directory in which to store the unstamped
## files.
## Register the exception:
exception_prefix = "Unable to stamp files in [%s]. Couldn't " \
"create directory in which to store the " \
"original, unstamped files." \
% dirname
register_exception(prefix=exception_prefix)
## Since we can't make a directory for the unstamped files,
## we can't continue to stamp them.
## Skip the stamping of the contents of this directory by raising
## a warning:
msg = "Warning: A problem occurred when stamping files in [%s]. " \
"Unable to create directory to store the original, " \
"unstamped files. Got this error: [%s]. This means the " \
"files in this directory were not stamped." \
% (dirname, str(err))
raise InvenioWebSubmitFunctionWarning(msg)
## Loop through each file in the directory and attempt to stamp it:
for file_to_stamp in filenames:
## Get the path to the file to be stamped and put it into the
## dictionary of options that will be passed to stamp_file:
path_to_subject_file = "%s/%s" % (dirname, file_to_stamp)
file_stamper_options['input-file'] = path_to_subject_file
## Just before attempting to stamp the file, log the dictionary of
## options (file_stamper_options) that will be passed to websubmit-
## file-stamper:
try:
fh_log = open("%s/websubmit_file_stamper-calls-options.log" \
% visit_for_stamping_arguments['curdir'], "a+")
fh_log.write("%s\n" % file_stamper_options)
fh_log.flush()
fh_log.close()
except IOError:
## Unable to log the file stamper options.
exception_prefix = "Unable to write websubmit_file_stamper " \
"options to log file " \
"%s/websubmit_file_stamper-calls-options.log" \
% visit_for_stamping_arguments['curdir']
register_exception(prefix=exception_prefix)
try:
## Try to stamp the file:
(stamped_file_path_only, stamped_file_name) = \
websubmit_file_stamper.stamp_file(file_stamper_options)
except InvenioWebSubmitFileStamperError:
## It wasn't possible to stamp this file.
## Register the exception along with an informational message:
exception_prefix = "A problem occurred when stamping [%s]. The " \
"stamping of this file was unsuccessful." \
% path_to_subject_file
register_exception(prefix=exception_prefix)
## Skip this file, moving on to the next:
continue
else:
## Stamping was successful.
path_to_stamped_file = "%s/%s" % (stamped_file_path_only, \
stamped_file_name)
## Move the unstamped file from the "files" directory into the
## "files_before_stamping" directory:
try:
shutil.move(path_to_subject_file, "%s/%s" \
% (dirname_files_pre_stamping, file_to_stamp))
except IOError:
## Couldn't move the original file away from the "files"
## directory. Log the problem and continue on to the next
## file:
exception_prefix = "A problem occurred when stamping [%s]. " \
"The file was sucessfully stamped, and " \
"can be found here: [%s]. Unfortunately " \
"though, it could not be copied back to " \
"the current submission's working " \
"directory because the unstamped version " \
"could not be moved out of the way (tried " \
"to move it from here [%s] to here: " \
"[%s/%s]). The stamping of this file was " \
"unsuccessful." \
% (path_to_subject_file, \
path_to_stamped_file, \
path_to_subject_file, \
dirname_files_pre_stamping, \
file_to_stamp)
register_exception(prefix=exception_prefix)
continue
else:
## The original file has been moved into the files before
## stamping directory. Now try to copy the stamped file into
## the files directory:
try:
shutil.copy(path_to_stamped_file, "%s/%s" \
% (dirname, file_to_stamp))
except IOError:
## Even though the original, unstamped file was moved away
## from the files directory, the stamped-version couldn't
## be moved into its place. Register the exception:
exception_prefix = "A problem occurred when stamping " \
"[%s]. The file was sucessfully " \
"stamped, and can be found here: " \
"[%s]. Unfortunately though, it " \
"could not be copied back to the " \
"current submission's working " \
"directory." % (path_to_subject_file, \
path_to_stamped_file)
register_exception(prefix=exception_prefix)
## Because it wasn't possible to move the stamped file
## into the files directory, attempt to move the original,
## unstamped file back into the files directory:
try:
shutil.move("%s/%s" % (dirname_files_pre_stamping, \
file_to_stamp), \
path_to_stamped_file)
except IOError, err:
## It wasn't possible even to move the original file
## back to the files directory. Register the
## exception and stop the stamping process - it isn't
## safe to continue:
exeption_prefix = "A problem occurred when stamping " \
"[%s]. The file was sucessfully " \
"stamped, and can be found here: " \
"[%s]. Unfortunately though, it " \
"could not be copied back to the " \
"current submission's working " \
"directory. Additionionally, the " \
"original, unstamped file " \
"could not be moved back to the " \
"files directory, from the files-" \
"before-stamping directory. It " \
"can now be found here: [%s/%s]. " \
"Stamping cannot continue and " \
"manual intervention is necessary " \
"because the file cannot be " \
"attached to the record." \
% (path_to_subject_file, \
path_to_stamped_file, \
dirname_files_pre_stamping, \
file_to_stamp)
register_exception(prefix=exeption_prefix)
## Raise an InvenioWebSubmitFunctionError, stopping
## further stamping, etc:
raise InvenioWebSubmitFunctionError(exception_prefix)
def get_dictionary_from_string(dict_string):
"""Given a string version of a "dictionary", split the string into a
python dictionary.
For example, given the following string:
{'TITLE' : 'EX_TITLE', 'AUTHOR' : 'EX_AUTHOR', 'REPORTNUMBER' : 'EX_RN'}
A dictionary in the following format will be returned:
{
'TITLE' : 'EX_TITLE',
'AUTHOR' : 'EX_AUTHOR',
'REPORTNUMBER' : 'EX_RN',
}
@param dict_string: (string) - the string version of the dictionary.
@return: (dictionary) - the dictionary build from the string.
"""
## First, strip off the leading and trailing spaces and braces:
dict_string = dict_string.strip(" {}")
## Next, split the string on commas (,) that have not been escaped
## So, the following string: """'hello' : 'world', 'click' : 'here'"""
## will be split into the following list:
## ["'hello' : 'world'", " 'click' : 'here'"]
##
## However, the string """'hello\, world' : '!', 'click' : 'here'"""
## will be split into: ["'hello\, world' : '!'", " 'click' : 'here'"]
## I.e. the comma that was escaped in the string has been kept.
##
## So basically, split on unescaped parameters at first:
key_vals = re.split(r'(?<!\\),', dict_string)
## Now we should have a list of "key" : "value" terms. For each of them,
## check it is OK. If not in the format "Key" : "Value" (quotes are
## optional), discard it. As with the comma separator in the previous
## splitting, this one splits on the first colon (:) ONLY.
final_dictionary = {}
for key_value_string in key_vals:
## Split the pair apart, based on the first ":":
key_value_pair = key_value_string.split(":", 1)
## check that the length of the new list is 2:
if len(key_value_pair) != 2:
## There was a problem with the splitting - pass this pair
continue
## The split was made.
## strip white-space, single-quotes and double-quotes from around the
## key and value pairs:
key_term = key_value_pair[0].strip(" '\"")
value_term = key_value_pair[1].strip(" '\"")
## Is the left-side (key) term empty?
if len(key_term) == 0:
continue
## Now, add the search-replace pair to the dictionary of
## search-replace terms:
final_dictionary[key_term] = value_term
return final_dictionary
| pombredanne/invenio-old | modules/websubmit/lib/functions/Stamp_Uploaded_Files.py | Python | gpl-2.0 | 24,729 | [
"VisIt"
] | 364276e20a18308449ba2fb1beb2b3bf70b216ffc8278a2ab28585bb845bc934 |
#!/usr/bin/python
import os, sys, getopt, string
from Bio.Seq import Seq
from Bio.Blast import NCBIXML
from Bio.Alphabet import IUPAC
#==============================================================================
def show_help():
print """%s parses BLASTX XML output to STDOUT
Options:
-f:\tBLASTX output in XML format
-n:\tnumber of best hits to be parsed (default: 1)
-e:\tmaximum e-value to accept hits (default: 1e-5)
What this program does:
It takes the best hit's start and endposition from BLAST, applies it to the sequence in your query (e.g. the CAP3-output),
and translates to the left resp. right from the start resp. end of your CAP3-output, until a Start-orStopcodon appears.
""" % sys.argv[0]
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
sys.stderr.write( "no arguments provided.\n" )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:n:e:" )
except getopt.GetoptError:
sys.stderr.write( "invalid arguments provided.\n" )
show_help()
args = {}
args['numhits'] = 1
args['evalue'] = float('1e-5')
for key, value in keys:
if key == '-f': args['blastfile'] = value
if key == '-n': args['numhits'] = int(value)
if key == '-e': args['evalue'] = float(value)
if not args.has_key('blastfile'):
sys.stderr.write( "blastx XML file argument missing.\n" )
show_help()
elif not os.path.exists( args.get('blastfile') ) or not os.path.isfile( args.get('blastfile') ):
sys.stderr.write( "blastx XML file does not exist.\n" )
show_help()
return args
#==============================================================================
def main(args):
#print "Working..."
header = ['query', 'hit', 'frame', 'query_startpos', 'query_endpos', 'subject_startpos', 'subject_endpos', 'evalue', 'score']
print '#', string.join(header, "\t")
XML = open( args.get('blastfile') )
blast_records = NCBIXML.parse(XML)
for i in blast_records:
# print i.query
count = 0
while count < args.get('numhits'):
count += 1
hit = i.alignments.pop(0)
hsp = hit.hsps[0]
if hsp.expect > args.get('evalue'): break
# print i.query, hit.title.split()[0], hsp.frame[0], hsp.query_start, hsp.query_start -1+ len(hsp.query)*3, hsp.sbjct_start, hsp.sbjct_start -1+ len(hsp.sbjct), hsp.expect, hsp.score
print string.join([i.query, hit.title.split()[0],
str(hsp.frame[0]),
str(hsp.query_start),
str(hsp.query_start -1+ len(hsp.query.replace('-', ''))*3),
str(hsp.sbjct_start),
str(hsp.sbjct_start -1+ len(hsp.sbjct)),
str(hsp.expect),
str(hsp.score)], "\t")
# =============================================================================
args = handle_arguments()
main( args )
| lotharwissler/bioinformatics | python/blast/parse-blastout-xml.py | Python | mit | 2,994 | [
"BLAST"
] | 4bed7acb3280da3773f7605ed27eca1285287be86e656961e2e81f3f5afda2d4 |
from numpy import linspace
from ase.calculators.fleur import FLEUR
from ase.lattice import bulk
from ase.io.trajectory import Trajectory
atoms = bulk('Ni', a=3.52)
calc = FLEUR(xc='PBE', kmax=3.6, kpts=(10, 10, 10), workdir='lat_const')
atoms.set_calculator(calc)
traj = Trajectory('Ni.traj','w', atoms)
cell0 = atoms.get_cell()
for s in linspace(0.95, 1.05, 7):
cell = cell0 * s
atoms.set_cell((cell))
ene = atoms.get_potential_energy()
traj.write()
| misdoro/python-ase | doc/ase/calculators/fcc_Ni_fleur.py | Python | gpl-2.0 | 470 | [
"ASE",
"FLEUR"
] | 30985c81318029b61cb99565247f07f35e956ff17efb638638b34dd0331fd44f |
##########################################################################
#
# Copyright 2008-2010 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""Common trace code generation."""
# Adjust path
import os.path
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import itertools
import specs.stdapi as stdapi
def getWrapperInterfaceName(interface):
return "Wrap" + interface.expr
debug = False
class ComplexValueSerializer(stdapi.OnceVisitor):
'''Type visitors which generates serialization functions for
complex types.
Simple types are serialized inline.
'''
def __init__(self, serializer):
stdapi.OnceVisitor.__init__(self)
self.serializer = serializer
def visitVoid(self, literal):
pass
def visitLiteral(self, literal):
pass
def visitString(self, string):
pass
def visitConst(self, const):
self.visit(const.type)
def visitStruct(self, struct):
print 'static const char * _struct%s_members[%u] = {' % (struct.tag, len(struct.members))
for type, name, in struct.members:
if name is None:
print ' "",'
else:
print ' "%s",' % (name,)
print '};'
print 'static const trace::StructSig _struct%s_sig = {' % (struct.tag,)
if struct.name is None:
structName = '""'
else:
structName = '"%s"' % struct.name
print ' %u, %s, %u, _struct%s_members' % (struct.id, structName, len(struct.members), struct.tag)
print '};'
print
def visitArray(self, array):
self.visit(array.type)
def visitAttribArray(self, array):
pass
def visitBlob(self, array):
pass
def visitEnum(self, enum):
print 'static const trace::EnumValue _enum%s_values[] = {' % (enum.tag)
for value in enum.values:
print ' {"%s", %s},' % (value, value)
print '};'
print
print 'static const trace::EnumSig _enum%s_sig = {' % (enum.tag)
print ' %u, %u, _enum%s_values' % (enum.id, len(enum.values), enum.tag)
print '};'
print
def visitBitmask(self, bitmask):
print 'static const trace::BitmaskFlag _bitmask%s_flags[] = {' % (bitmask.tag)
for value in bitmask.values:
print ' {"%s", %s},' % (value, value)
print '};'
print
print 'static const trace::BitmaskSig _bitmask%s_sig = {' % (bitmask.tag)
print ' %u, %u, _bitmask%s_flags' % (bitmask.id, len(bitmask.values), bitmask.tag)
print '};'
print
def visitPointer(self, pointer):
self.visit(pointer.type)
def visitIntPointer(self, pointer):
pass
def visitObjPointer(self, pointer):
self.visit(pointer.type)
def visitLinearPointer(self, pointer):
self.visit(pointer.type)
def visitHandle(self, handle):
self.visit(handle.type)
def visitReference(self, reference):
self.visit(reference.type)
def visitAlias(self, alias):
self.visit(alias.type)
def visitOpaque(self, opaque):
pass
def visitInterface(self, interface):
pass
def visitPolymorphic(self, polymorphic):
if not polymorphic.contextLess:
return
print 'static void _write__%s(int selector, %s const & value) {' % (polymorphic.tag, polymorphic.expr)
print ' switch (selector) {'
for cases, type in polymorphic.iterSwitch():
for case in cases:
print ' %s:' % case
self.serializer.visit(type, '(%s)(value)' % (type,))
print ' break;'
print ' }'
print '}'
print
class ValueSerializer(stdapi.Visitor, stdapi.ExpanderMixin):
'''Visitor which generates code to serialize any type.
Simple types are serialized inline here, whereas the serialization of
complex types is dispatched to the serialization functions generated by
ComplexValueSerializer visitor above.
'''
def visitLiteral(self, literal, instance):
print ' trace::localWriter.write%s(%s);' % (literal.kind, instance)
def visitString(self, string, instance):
if not string.wide:
cast = 'const char *'
suffix = 'String'
else:
cast = 'const wchar_t *'
suffix = 'WString'
if cast != string.expr:
# reinterpret_cast is necessary for GLubyte * <=> char *
instance = 'reinterpret_cast<%s>(%s)' % (cast, instance)
if string.length is not None:
length = ', %s' % self.expand(string.length)
else:
length = ''
print ' trace::localWriter.write%s(%s%s);' % (suffix, instance, length)
def visitConst(self, const, instance):
self.visit(const.type, instance)
def visitStruct(self, struct, instance):
print ' trace::localWriter.beginStruct(&_struct%s_sig);' % (struct.tag,)
for member in struct.members:
self.visitMember(member, instance)
print ' trace::localWriter.endStruct();'
def visitArray(self, array, instance):
length = '_c' + array.type.tag
index = '_i' + array.type.tag
array_length = self.expand(array.length)
print ' if (%s) {' % instance
print ' size_t %s = %s > 0 ? %s : 0;' % (length, array_length, array_length)
print ' trace::localWriter.beginArray(%s);' % length
print ' for (size_t %s = 0; %s < %s; ++%s) {' % (index, index, length, index)
print ' trace::localWriter.beginElement();'
self.visitElement(index, array.type, '(%s)[%s]' % (instance, index))
print ' trace::localWriter.endElement();'
print ' }'
print ' trace::localWriter.endArray();'
print ' } else {'
print ' trace::localWriter.writeNull();'
print ' }'
def visitAttribArray(self, array, instance):
# For each element, decide if it is a key or a value (which depends on the previous key).
# If it is a value, store it as the right type - usually int, some bitfield, or some enum.
# It is currently assumed that an unknown key means that it is followed by an int value.
# determine the array length which must be passed to writeArray() up front
count = '_c' + array.baseType.tag
print ' {'
print ' int %s;' % count
print ' for (%(c)s = 0; %(array)s && %(array)s[%(c)s] != %(terminator)s; %(c)s += 2) {' \
% {'c': count, 'array': instance, 'terminator': array.terminator}
if array.hasKeysWithoutValues:
print ' switch (int(%(array)s[%(c)s])) {' % {'array': instance, 'c': count}
for key, valueType in array.valueTypes:
if valueType is None:
print ' case %s:' % key
print ' %s--;' % count # the next value is a key again and checked if it's the terminator
print ' break;'
print ' }'
print ' }'
print ' %(c)s += %(array)s ? 1 : 0;' % {'c': count, 'array': instance}
print ' trace::localWriter.beginArray(%s);' % count
# for each key / key-value pair write the key and the value, if the key requires one
index = '_i' + array.baseType.tag
print ' for (int %(i)s = 0; %(i)s < %(count)s; %(i)s++) {' % {'i': index, 'count': count}
print ' trace::localWriter.beginElement();'
self.visit(array.baseType, "%(array)s[%(i)s]" % {'array': instance, 'i': index})
print ' trace::localWriter.endElement();'
print ' if (%(i)s + 1 >= %(count)s) {' % {'i': index, 'count': count}
print ' break;'
print ' }'
print ' switch (int(%(array)s[%(i)s++])) {' % {'array': instance, 'i': index}
# write generic value the usual way
for key, valueType in array.valueTypes:
if valueType is not None:
print ' case %s:' % key
print ' trace::localWriter.beginElement();'
self.visitElement(index, valueType, '(%(array)s)[%(i)s]' % {'array': instance, 'i': index})
print ' trace::localWriter.endElement();'
print ' break;'
# known key with no value, just decrease the index so we treat the next value as a key
if array.hasKeysWithoutValues:
for key, valueType in array.valueTypes:
if valueType is None:
print ' case %s:' % key
print ' %s--;' % index
print ' break;'
# unknown key, write an int value
print ' default:'
print ' trace::localWriter.beginElement();'
print ' os::log("apitrace: warning: %s: unknown key 0x%04X, interpreting value as int\\n", ' + \
'__FUNCTION__, int(%(array)s[%(i)s - 1]));' % {'array': instance, 'i': index}
print ' trace::localWriter.writeSInt(%(array)s[%(i)s]);' % {'array': instance, 'i': index}
print ' trace::localWriter.endElement();'
print ' break;'
print ' }'
print ' }'
print ' trace::localWriter.endArray();'
print ' }'
def visitBlob(self, blob, instance):
print ' trace::localWriter.writeBlob(%s, %s);' % (instance, self.expand(blob.size))
def visitEnum(self, enum, instance):
print ' trace::localWriter.writeEnum(&_enum%s_sig, %s);' % (enum.tag, instance)
def visitBitmask(self, bitmask, instance):
print ' trace::localWriter.writeBitmask(&_bitmask%s_sig, %s);' % (bitmask.tag, instance)
def visitPointer(self, pointer, instance):
print ' if (%s) {' % instance
print ' trace::localWriter.beginArray(1);'
print ' trace::localWriter.beginElement();'
self.visit(pointer.type, "*" + instance)
print ' trace::localWriter.endElement();'
print ' trace::localWriter.endArray();'
print ' } else {'
print ' trace::localWriter.writeNull();'
print ' }'
def visitIntPointer(self, pointer, instance):
print ' trace::localWriter.writePointer((uintptr_t)%s);' % instance
def visitObjPointer(self, pointer, instance):
print ' trace::localWriter.writePointer((uintptr_t)%s);' % instance
def visitLinearPointer(self, pointer, instance):
print ' trace::localWriter.writePointer((uintptr_t)%s);' % instance
def visitReference(self, reference, instance):
self.visit(reference.type, instance)
def visitHandle(self, handle, instance):
self.visit(handle.type, instance)
def visitAlias(self, alias, instance):
self.visit(alias.type, instance)
def visitOpaque(self, opaque, instance):
print ' trace::localWriter.writePointer((uintptr_t)%s);' % instance
def visitInterface(self, interface, instance):
assert False
def visitPolymorphic(self, polymorphic, instance):
if polymorphic.contextLess:
print ' _write__%s(%s, %s);' % (polymorphic.tag, polymorphic.switchExpr, instance)
else:
switchExpr = self.expand(polymorphic.switchExpr)
print ' switch (%s) {' % switchExpr
for cases, type in polymorphic.iterSwitch():
for case in cases:
print ' %s:' % case
caseInstance = instance
if type.expr is not None:
caseInstance = 'static_cast<%s>(%s)' % (type, caseInstance)
self.visit(type, caseInstance)
print ' break;'
if polymorphic.defaultType is None:
print r' default:'
print r' os::log("apitrace: warning: %%s: unexpected polymorphic case %%i\n", __FUNCTION__, (int)%s);' % (switchExpr,)
print r' trace::localWriter.writeNull();'
print r' break;'
print ' }'
class WrapDecider(stdapi.Traverser):
'''Type visitor which will decide wheter this type will need wrapping or not.
For complex types (arrays, structures), we need to know this before hand.
'''
def __init__(self):
self.needsWrapping = False
def visitLinearPointer(self, void):
pass
def visitInterface(self, interface):
self.needsWrapping = True
class ValueWrapper(stdapi.Traverser, stdapi.ExpanderMixin):
'''Type visitor which will generate the code to wrap an instance.
Wrapping is necessary mostly for interfaces, however interface pointers can
appear anywhere inside complex types.
'''
def visitStruct(self, struct, instance):
for member in struct.members:
self.visitMember(member, instance)
def visitArray(self, array, instance):
array_length = self.expand(array.length)
print " if (%s) {" % instance
print " for (size_t _i = 0, _s = %s; _i < _s; ++_i) {" % array_length
self.visitElement('_i', array.type, instance + "[_i]")
print " }"
print " }"
def visitPointer(self, pointer, instance):
print " if (%s) {" % instance
self.visit(pointer.type, "*" + instance)
print " }"
def visitObjPointer(self, pointer, instance):
elem_type = pointer.type.mutable()
if isinstance(elem_type, stdapi.Interface):
self.visitInterfacePointer(elem_type, instance)
elif isinstance(elem_type, stdapi.Alias) and isinstance(elem_type.type, stdapi.Interface):
self.visitInterfacePointer(elem_type.type, instance)
else:
self.visitPointer(pointer, instance)
def visitInterface(self, interface, instance):
raise NotImplementedError
def visitInterfacePointer(self, interface, instance):
print " Wrap%s::_wrap(__FUNCTION__, &%s);" % (interface.name, instance)
def visitPolymorphic(self, type, instance):
# XXX: There might be polymorphic values that need wrapping in the future
raise NotImplementedError
class ValueUnwrapper(ValueWrapper):
'''Reverse of ValueWrapper.'''
allocated = False
def visitStruct(self, struct, instance):
if not self.allocated:
# Argument is constant. We need to create a non const
print ' {'
print " %s * _t = static_cast<%s *>(alloca(sizeof *_t));" % (struct, struct)
print ' *_t = %s;' % (instance,)
assert instance.startswith('*')
print ' %s = _t;' % (instance[1:],)
instance = '*_t'
self.allocated = True
try:
return ValueWrapper.visitStruct(self, struct, instance)
finally:
print ' }'
else:
return ValueWrapper.visitStruct(self, struct, instance)
def visitArray(self, array, instance):
if self.allocated or isinstance(instance, stdapi.Interface):
return ValueWrapper.visitArray(self, array, instance)
array_length = self.expand(array.length)
elem_type = array.type.mutable()
print " if (%s && %s) {" % (instance, array_length)
print " %s * _t = static_cast<%s *>(alloca(%s * sizeof *_t));" % (elem_type, elem_type, array_length)
print " for (size_t _i = 0, _s = %s; _i < _s; ++_i) {" % array_length
print " _t[_i] = %s[_i];" % instance
self.allocated = True
self.visit(array.type, "_t[_i]")
print " }"
print " %s = _t;" % instance
print " }"
def visitInterfacePointer(self, interface, instance):
print r' Wrap%s::_unwrap(__FUNCTION__, &%s);' % (interface.name, instance)
def _getInterfaceHierarchy(allIfaces, baseIface, result):
for iface in allIfaces:
if iface.base is baseIface:
_getInterfaceHierarchy(allIfaces, iface, result)
result.append(iface)
def getInterfaceHierarchy(allIfaces, baseIface):
result = []
_getInterfaceHierarchy(allIfaces, baseIface, result)
return result
class Tracer:
'''Base class to orchestrate the code generation of API tracing.'''
# 0-3 are reserved to memcpy, malloc, free, and realloc
__id = 4
def __init__(self):
self.api = None
def serializerFactory(self):
'''Create a serializer.
Can be overriden by derived classes to inject their own serialzer.
'''
return ValueSerializer()
def traceApi(self, api):
self.api = api
self.header(api)
# Includes
for module in api.modules:
for header in module.headers:
print header
print
# Generate the serializer functions
types = api.getAllTypes()
visitor = ComplexValueSerializer(self.serializerFactory())
map(visitor.visit, types)
print
# Interfaces wrapers
self.traceInterfaces(api)
# Function wrappers
self.interface = None
self.base = None
for function in api.getAllFunctions():
self.traceFunctionDecl(function)
for function in api.getAllFunctions():
self.traceFunctionImpl(function)
print
self.footer(api)
def header(self, api):
print '#ifdef _WIN32'
print '# include <malloc.h> // alloca'
print '# ifndef alloca'
print '# define alloca _alloca'
print '# endif'
print '#else'
print '# include <alloca.h> // alloca'
print '#endif'
print
print
print 'static std::map<void *, void *> g_WrappedObjects;'
def footer(self, api):
pass
def traceFunctionDecl(self, function):
# Per-function declarations
if not function.internal:
if function.args:
print 'static const char * _%s_args[%u] = {%s};' % (function.name, len(function.args), ', '.join(['"%s"' % arg.name for arg in function.args]))
else:
print 'static const char ** _%s_args = NULL;' % (function.name,)
print 'static const trace::FunctionSig _%s_sig = {%u, "%s", %u, _%s_args};' % (function.name, self.getFunctionSigId(), function.name, len(function.args), function.name)
print
def getFunctionSigId(self):
id = Tracer.__id
Tracer.__id += 1
return id
def isFunctionPublic(self, function):
return True
def traceFunctionImpl(self, function):
if self.isFunctionPublic(function):
print 'extern "C" PUBLIC'
else:
print 'extern "C" PRIVATE'
print function.prototype() + ' {'
if function.type is not stdapi.Void:
print ' %s _result;' % function.type
for arg in function.args:
if not arg.output:
self.unwrapArg(function, arg)
self.traceFunctionImplBody(function)
# XXX: wrapping should go here, but before we can do that we'll need to protect g_WrappedObjects with its own mutex
if function.type is not stdapi.Void:
print ' return _result;'
print '}'
print
def traceFunctionImplBody(self, function):
if not function.internal:
print ' unsigned _call = trace::localWriter.beginEnter(&_%s_sig);' % (function.name,)
for arg in function.args:
if not arg.output:
self.serializeArg(function, arg)
print ' trace::localWriter.endEnter();'
self.invokeFunction(function)
if not function.internal:
print ' trace::localWriter.beginLeave(_call);'
print ' if (%s) {' % self.wasFunctionSuccessful(function)
for arg in function.args:
if arg.output:
self.serializeArg(function, arg)
self.wrapArg(function, arg)
print ' }'
if function.type is not stdapi.Void:
self.serializeRet(function, "_result")
if function.type is not stdapi.Void:
self.wrapRet(function, "_result")
print ' trace::localWriter.endLeave();'
def invokeFunction(self, function):
self.doInvokeFunction(function)
def doInvokeFunction(self, function, prefix='_', suffix=''):
# Same as invokeFunction() but called both when trace is enabled or disabled.
if function.type is stdapi.Void:
result = ''
else:
result = '_result = '
dispatch = prefix + function.name + suffix
print ' %s%s(%s);' % (result, dispatch, ', '.join([str(arg.name) for arg in function.args]))
def wasFunctionSuccessful(self, function):
if function.type is stdapi.Void:
return 'true'
if str(function.type) == 'HRESULT':
return 'SUCCEEDED(_result)'
return 'true'
def serializeArg(self, function, arg):
print ' trace::localWriter.beginArg(%u);' % (arg.index,)
self.serializeArgValue(function, arg)
print ' trace::localWriter.endArg();'
def serializeArgValue(self, function, arg):
self.serializeValue(arg.type, arg.name)
def wrapArg(self, function, arg):
assert not isinstance(arg.type, stdapi.ObjPointer)
from specs.winapi import REFIID
riid = None
for other_arg in function.args:
if not other_arg.output and other_arg.type is REFIID:
riid = other_arg
if riid is not None \
and riid.name != 'EmulatedInterface' \
and isinstance(arg.type, stdapi.Pointer) \
and isinstance(arg.type.type, stdapi.ObjPointer):
self.wrapIid(function, riid, arg)
return
self.wrapValue(arg.type, arg.name)
def unwrapArg(self, function, arg):
self.unwrapValue(arg.type, arg.name)
def serializeRet(self, function, instance):
print ' trace::localWriter.beginReturn();'
self.serializeValue(function.type, instance)
print ' trace::localWriter.endReturn();'
def serializeValue(self, type, instance):
serializer = self.serializerFactory()
serializer.visit(type, instance)
def wrapRet(self, function, instance):
self.wrapValue(function.type, instance)
def needsWrapping(self, type):
visitor = WrapDecider()
visitor.visit(type)
return visitor.needsWrapping
def wrapValue(self, type, instance):
if self.needsWrapping(type):
visitor = ValueWrapper()
visitor.visit(type, instance)
def unwrapValue(self, type, instance):
if self.needsWrapping(type):
visitor = ValueUnwrapper()
visitor.visit(type, instance)
def traceInterfaces(self, api):
interfaces = api.getAllInterfaces()
if not interfaces:
return
map(self.declareWrapperInterface, interfaces)
# Helper functions to wrap/unwrap interface pointers
print r'static inline bool'
print r'hasChildInterface(REFIID riid, IUnknown *pUnknown) {'
print r' IUnknown *pObj = NULL;'
print r' HRESULT hr = pUnknown->QueryInterface(riid, (VOID **)&pObj);'
print r' if (FAILED(hr)) {'
print r' return false;'
print r' }'
print r' assert(pObj);'
print r' pObj->Release();'
print r' return pUnknown == pObj;'
print r'}'
print
print r'static inline const void *'
print r'getVtbl(const void *pvObj) {'
print r' return pvObj ? *(const void **)pvObj : NULL;'
print r'}'
print
self.implementIidWrapper(api)
map(self.implementWrapperInterface, interfaces)
print
def declareWrapperInterface(self, interface):
wrapperInterfaceName = getWrapperInterfaceName(interface)
print "class %s : public %s " % (wrapperInterfaceName, interface.name)
print "{"
print "private:"
print " %s(%s * pInstance);" % (wrapperInterfaceName, interface.name)
print " virtual ~%s();" % wrapperInterfaceName
print "public:"
print " static %s* _create(const char *entryName, %s * pInstance);" % (wrapperInterfaceName, interface.name)
print " static void _wrap(const char *entryName, %s ** ppInstance);" % (interface.name,)
print " static void _unwrap(const char *entryName, %s ** pInstance);" % (interface.name,)
print
methods = list(interface.iterMethods())
for method in methods:
print " " + method.prototype() + ";"
print
for type, name, value in self.enumWrapperInterfaceVariables(interface):
print ' %s %s;' % (type, name)
print
print r'private:'
print r' void _dummy(unsigned i) const {'
print r' os::log("error: %%s: unexpected virtual method %%i of instance pWrapper=%%p pvObj=%%p pVtbl=%%p\n", "%s", i, this, m_pInstance, m_pVtbl);' % interface.name
print r' trace::localWriter.flush();'
print r' os::abort();'
print r' }'
print
for i in range(len(methods), 64):
print r' virtual void _dummy%i(void) const { _dummy(%i); }' % (i, i)
print
print "};"
print
def enumWrapperInterfaceVariables(self, interface):
return [
("DWORD", "m_dwMagic", "0xd8365d6c"),
("%s *" % interface.name, "m_pInstance", "pInstance"),
("const void *", "m_pVtbl", "getVtbl(pInstance)"),
("UINT", "m_NumMethods", len(list(interface.iterBaseMethods()))),
]
def implementWrapperInterface(self, iface):
self.interface = iface
wrapperInterfaceName = getWrapperInterfaceName(iface)
# Private constructor
print '%s::%s(%s * pInstance) {' % (wrapperInterfaceName, wrapperInterfaceName, iface.name)
for type, name, value in self.enumWrapperInterfaceVariables(iface):
if value is not None:
print ' %s = %s;' % (name, value)
print '}'
print
# Public constructor
print '%s *%s::_create(const char *entryName, %s * pInstance) {' % (wrapperInterfaceName, wrapperInterfaceName, iface.name)
print r' Wrap%s *pWrapper = new Wrap%s(pInstance);' % (iface.name, iface.name)
if debug:
print r' os::log("%%s: created %s pvObj=%%p pWrapper=%%p pVtbl=%%p\n", entryName, pInstance, pWrapper, pWrapper->m_pVtbl);' % iface.name
print r' g_WrappedObjects[pInstance] = pWrapper;'
print r' return pWrapper;'
print '}'
print
# Destructor
print '%s::~%s() {' % (wrapperInterfaceName, wrapperInterfaceName)
if debug:
print r' os::log("%s::Release: deleted pvObj=%%p pWrapper=%%p pVtbl=%%p\n", m_pInstance, this, m_pVtbl);' % iface.name
print r' g_WrappedObjects.erase(m_pInstance);'
print '}'
print
baseMethods = list(iface.iterBaseMethods())
for base, method in baseMethods:
self.base = base
self.implementWrapperInterfaceMethod(iface, base, method)
print
# Wrap pointer
ifaces = self.api.getAllInterfaces()
print r'void'
print r'%s::_wrap(const char *entryName, %s **ppObj) {' % (wrapperInterfaceName, iface.name)
print r' if (!ppObj) {'
print r' return;'
print r' }'
print r' %s *pObj = *ppObj;' % (iface.name,)
print r' if (!pObj) {'
print r' return;'
print r' }'
print r' assert(hasChildInterface(IID_%s, pObj));' % iface.name
print r' std::map<void *, void *>::const_iterator it = g_WrappedObjects.find(pObj);'
print r' if (it != g_WrappedObjects.end()) {'
print r' Wrap%s *pWrapper = (Wrap%s *)it->second;' % (iface.name, iface.name)
print r' assert(pWrapper);'
print r' assert(pWrapper->m_dwMagic == 0xd8365d6c);'
print r' assert(pWrapper->m_pInstance == pObj);'
print r' if (pWrapper->m_pVtbl == getVtbl(pObj) &&'
print r' pWrapper->m_NumMethods >= %s) {' % len(baseMethods)
if debug:
print r' os::log("%s: fetched pvObj=%p pWrapper=%p pVtbl=%p\n", entryName, pObj, pWrapper, pWrapper->m_pVtbl);'
print r' *ppObj = pWrapper;'
print r' return;'
print r' }'
print r' }'
else_ = ''
for childIface in getInterfaceHierarchy(ifaces, iface):
print r' %sif (hasChildInterface(IID_%s, pObj)) {' % (else_, childIface.name)
print r' pObj = Wrap%s::_create(entryName, static_cast<%s *>(pObj));' % (childIface.name, childIface.name)
print r' }'
else_ = 'else '
print r' %s{' % else_
print r' pObj = Wrap%s::_create(entryName, pObj);' % iface.name
print r' }'
print r' *ppObj = pObj;'
print r'}'
print
# Unwrap pointer
print r'void'
print r'%s::_unwrap(const char *entryName, %s **ppObj) {' % (wrapperInterfaceName, iface.name)
print r' if (!ppObj || !*ppObj) {'
print r' return;'
print r' }'
print r' const %s *pWrapper = static_cast<const %s*>(*ppObj);' % (wrapperInterfaceName, getWrapperInterfaceName(iface))
print r' if (pWrapper && pWrapper->m_dwMagic == 0xd8365d6c) {'
print r' *ppObj = pWrapper->m_pInstance;'
print r' } else {'
print r' os::log("apitrace: warning: %%s: unexpected %%s pointer %%p\n", entryName, "%s", *ppObj);' % iface.name
print r' }'
print r'}'
print
def implementWrapperInterfaceMethod(self, interface, base, method):
wrapperInterfaceName = getWrapperInterfaceName(interface)
print method.prototype(wrapperInterfaceName + '::' + method.name) + ' {'
if False:
print r' os::log("%%s(%%p -> %%p)\n", "%s", this, m_pInstance);' % (wrapperInterfaceName + '::' + method.name)
if method.type is not stdapi.Void:
print ' %s _result;' % method.type
print ' %s *_this = static_cast<%s *>(m_pInstance);' % (base, base)
for arg in method.args:
if not arg.output:
self.unwrapArg(method, arg)
self.implementWrapperInterfaceMethodBody(interface, base, method)
# XXX: wrapping should go here, but before we can do that we'll need to protect g_WrappedObjects with its own mutex
if method.type is not stdapi.Void:
print ' return _result;'
print '}'
print
def implementWrapperInterfaceMethodBody(self, interface, base, method):
assert not method.internal
print ' static const char * _args[%u] = {%s};' % (len(method.args) + 1, ', '.join(['"this"'] + ['"%s"' % arg.name for arg in method.args]))
print ' static const trace::FunctionSig _sig = {%u, "%s", %u, _args};' % (self.getFunctionSigId(), interface.name + '::' + method.name, len(method.args) + 1)
print ' unsigned _call = trace::localWriter.beginEnter(&_sig);'
print ' trace::localWriter.beginArg(0);'
print ' trace::localWriter.writePointer((uintptr_t)m_pInstance);'
print ' trace::localWriter.endArg();'
for arg in method.args:
if not arg.output:
self.serializeArg(method, arg)
print ' trace::localWriter.endEnter();'
self.invokeMethod(interface, base, method)
print ' trace::localWriter.beginLeave(_call);'
print ' if (%s) {' % self.wasFunctionSuccessful(method)
for arg in method.args:
if arg.output:
self.serializeArg(method, arg)
self.wrapArg(method, arg)
print ' }'
if method.type is not stdapi.Void:
self.serializeRet(method, '_result')
if method.type is not stdapi.Void:
self.wrapRet(method, '_result')
if method.name == 'Release':
assert method.type is not stdapi.Void
print r' if (!_result) {'
print r' delete this;'
print r' }'
print ' trace::localWriter.endLeave();'
def implementIidWrapper(self, api):
ifaces = api.getAllInterfaces()
print r'static void'
print r'warnIID(const char *entryName, REFIID riid, void *pvObj, const char *reason) {'
print r' os::log("apitrace: warning: %s: %s IID %08lx-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",'
print r' entryName, reason,'
print r' riid.Data1, riid.Data2, riid.Data3,'
print r' riid.Data4[0], riid.Data4[1], riid.Data4[2], riid.Data4[3], riid.Data4[4], riid.Data4[5], riid.Data4[6], riid.Data4[7]);'
print r' void * pVtbl = *(void **)pvObj;'
print r' HMODULE hModule = 0;'
print r' BOOL bRet = GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |'
print r' GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,'
print r' (LPCTSTR)pVtbl,'
print r' &hModule);'
print r' assert(bRet);'
print r' if (bRet) {'
print r' char szModule[MAX_PATH];'
print r' DWORD dwRet = GetModuleFileNameA(hModule, szModule, sizeof szModule);'
print r' assert(dwRet);'
print r' if (dwRet) {'
print r' DWORD dwOffset = (UINT_PTR)pVtbl - (UINT_PTR)hModule;'
print r' os::log("apitrace: warning: pVtbl = %p (%s!+0x%0lx)\n", pVtbl, szModule, dwOffset);'
print r' }'
print r' }'
print r'}'
print
print r'static void'
print r'wrapIID(const char *entryName, REFIID riid, void * * ppvObj) {'
print r' if (!ppvObj || !*ppvObj) {'
print r' return;'
print r' }'
else_ = ''
for iface in ifaces:
print r' %sif (riid == IID_%s) {' % (else_, iface.name)
print r' Wrap%s::_wrap(entryName, (%s **) ppvObj);' % (iface.name, iface.name)
print r' }'
else_ = 'else '
print r' %s{' % else_
print r' warnIID(entryName, riid, *ppvObj, "unknown");'
print r' }'
print r'}'
print
def wrapIid(self, function, riid, out):
# Cast output arg to `void **` if necessary
out_name = out.name
obj_type = out.type.type.type
if not obj_type is stdapi.Void:
assert isinstance(obj_type, stdapi.Interface)
out_name = 'reinterpret_cast<void * *>(%s)' % out_name
print r' if (%s && *%s) {' % (out.name, out.name)
functionName = function.name
else_ = ''
if self.interface is not None:
functionName = self.interface.name + '::' + functionName
print r' if (*%s == m_pInstance &&' % (out_name,)
print r' (%s)) {' % ' || '.join('%s == IID_%s' % (riid.name, iface.name) for iface in self.interface.iterBases())
print r' *%s = this;' % (out_name,)
print r' }'
else_ = 'else '
print r' %s{' % else_
print r' wrapIID("%s", %s, %s);' % (functionName, riid.name, out_name)
print r' }'
print r' }'
def invokeMethod(self, interface, base, method):
if method.type is stdapi.Void:
result = ''
else:
result = '_result = '
print ' %s_this->%s(%s);' % (result, method.name, ', '.join([str(arg.name) for arg in method.args]))
def emit_memcpy(self, ptr, size):
print ' trace::fakeMemcpy(%s, %s);' % (ptr, size)
def fake_call(self, function, args):
print ' unsigned _fake_call = trace::localWriter.beginEnter(&_%s_sig, true);' % (function.name,)
for arg, instance in zip(function.args, args):
assert not arg.output
print ' trace::localWriter.beginArg(%u);' % (arg.index,)
self.serializeValue(arg.type, instance)
print ' trace::localWriter.endArg();'
print ' trace::localWriter.endEnter();'
print ' trace::localWriter.beginLeave(_fake_call);'
print ' trace::localWriter.endLeave();'
| surround-io/apitrace | wrappers/trace.py | Python | mit | 38,536 | [
"VisIt"
] | 9b3acecef0c5060228db68865610bc0d1ad53d0ea29044c3824bb116dcd41bae |
# -*- coding: utf-8 -*-
"""
A real simple app for using webapp2 with auth and session.
It just covers the basics. Creating a user, login, logout
and a decorator for protecting certain handlers.
Routes are setup in routes.py and added in main.py
"""
# standard library imports
import logging
import json
# related third party imports
import webapp2
import httpagentparser
from webapp2_extras import security
from webapp2_extras.auth import InvalidAuthIdError, InvalidPasswordError
from webapp2_extras.i18n import gettext as _
from webapp2_extras.appengine.auth.models import Unique
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.api.datastore_errors import BadValueError
from google.appengine.runtime import apiproxy_errors
from github import github
from linkedin import linkedin
# local application/library specific imports
import models
import forms as forms
from lib import utils, captcha, twitter
from lib.basehandler import BaseHandler
from lib.decorators import user_required
from lib.decorators import taskqueue_method
from lib import facebook
class LoginRequiredHandler(BaseHandler):
def get(self):
continue_url, = self.request.get('continue', allow_multiple=True)
self.redirect(users.create_login_url(dest_url=continue_url))
class RegisterBaseHandler(BaseHandler):
"""
Base class for handlers with registration and login forms.
"""
@webapp2.cached_property
def form(self):
return forms.RegisterForm(self)
class SendEmailHandler(BaseHandler):
"""
Core Handler for sending Emails
Use with TaskQueue
"""
@taskqueue_method
def post(self):
from google.appengine.api import mail, app_identity
to = self.request.get("to")
subject = self.request.get("subject")
body = self.request.get("body")
sender = self.request.get("sender")
if sender != '' or not utils.is_email_valid(sender):
if utils.is_email_valid(self.app.config.get('contact_sender')):
sender = self.app.config.get('contact_sender')
else:
app_id = app_identity.get_application_id()
sender = "%s <no-reply@%s.appspotmail.com>" % (app_id, app_id)
if self.app.config['log_email']:
try:
logEmail = models.LogEmail(
sender=sender,
to=to,
subject=subject,
body=body,
when=utils.get_date_time("datetimeProperty")
)
logEmail.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Email Log in datastore")
try:
message = mail.EmailMessage()
message.sender = sender
message.to = to
message.subject = subject
message.html = body
message.send()
except Exception, e:
logging.error("Error sending email: %s" % e)
class LoginHandler(BaseHandler):
"""
Handler for authentication
"""
def get(self):
""" Returns a simple HTML form for login """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('login.html', **params)
def post(self):
"""
username: Get the username from POST dict
password: Get the password from POST dict
"""
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
continue_url = self.request.get('continue_url').encode('ascii', 'ignore')
try:
if utils.is_email_valid(username):
user = models.User.get_by_email(username)
if user:
auth_id = user.auth_ids[0]
else:
raise InvalidAuthIdError
else:
auth_id = "own:%s" % username
user = models.User.get_by_auth_id(auth_id)
password = self.form.password.data.strip()
remember_me = True if str(self.request.POST.get('remember_me')) == 'on' else False
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Try to login user with password
# Raises InvalidAuthIdError if user is not found
# Raises InvalidPasswordError if provided password
# doesn't match with specified user
self.auth.get_user_by_password(
auth_id, password, remember=remember_me)
# if user account is not activated, logout and redirect to home
if (user.activated == False):
# logout
self.auth.unset_session()
# redirect to home with error message
resend_email_uri = self.uri_for('resend-account-activation', user_id=user.get_id(),
token=models.User.create_resend_token(user.get_id()))
message = _('Your account has not yet been activated. Please check your email to activate it or') + \
' <a href="' + resend_email_uri + '">' + _('click here') + '</a> ' + _('to resend the email.')
self.add_message(message, 'error')
return self.redirect_to('home')
# check twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user.key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='twitter',
uid=str(twitter_association_data['id']),
extra_data=twitter_association_data
)
social_user.put()
# check facebook association
fb_data = None
try:
fb_data = json.loads(self.session['facebook'])
except:
pass
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='facebook',
uid=str(fb_data['id']),
extra_data=fb_data
)
social_user.put()
# check linkedin association
li_data = None
try:
li_data = json.loads(self.session['linkedin'])
except:
pass
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='linkedin',
uid=str(li_data['id']),
extra_data=li_data
)
social_user.put()
# end linkedin
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Your username or password is incorrect. "
"Please try again (make sure your caps lock is off)")
self.add_message(message, 'error')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.LoginForm(self)
class SocialLoginHandler(BaseHandler):
"""
Handler for Social authentication
"""
def get(self, provider_name):
provider = self.provider_info[provider_name]
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
if provider_name == "twitter":
twitter_helper = twitter.TwitterAuth(self, redirect_uri=callback_url)
self.redirect(twitter_helper.auth_url())
elif provider_name == "facebook":
self.session['linkedin'] = None
perms = ['email', 'publish_stream']
self.redirect(facebook.auth_url(self.app.config.get('fb_api_key'), callback_url, perms))
elif provider_name == 'linkedin':
self.session['facebook'] = None
authentication = linkedin.LinkedInAuthentication(
self.app.config.get('linkedin_api'),
self.app.config.get('linkedin_secret'),
callback_url,
[linkedin.PERMISSIONS.BASIC_PROFILE, linkedin.PERMISSIONS.EMAIL_ADDRESS])
self.redirect(authentication.authorization_url)
elif provider_name == "github":
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'),
self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'),
self.app.config.get('github_redirect_uri'), scope)
self.redirect(github_helper.get_authorize_url())
elif provider_name == "google":
continue_url = self.request.get('continue_url')
if continue_url:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name, continue_url=continue_url)
else:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name)
login_url = users.create_login_url(dest_url)
self.redirect(login_url)
elif provider_name in models.SocialUser.open_id_providers():
continue_url = self.request.get('continue_url')
if continue_url:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name, continue_url=continue_url)
else:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name)
try:
login_url = users.create_login_url(federated_identity=provider['uri'], dest_url=dest_url)
self.redirect(login_url)
except users.NotAllowedError:
self.add_message('You must enable Federated Login Before for this application.<br> '
'<a href="http://appengine.google.com" target="_blank">Google App Engine Control Panel</a> -> '
'Administration -> Application Settings -> Authentication Options', 'error')
self.redirect_to('login')
else:
message = _('%s authentication is not yet implemented.' % provider.get('label'))
self.add_message(message, 'warning')
self.redirect_to('login')
class CallbackSocialLoginHandler(BaseHandler):
"""
Callback (Save Information) for Social Authentication
"""
def get(self, provider_name):
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
continue_url = self.request.get('continue_url')
if provider_name == "twitter":
oauth_token = self.request.get('oauth_token')
oauth_verifier = self.request.get('oauth_verifier')
twitter_helper = twitter.TwitterAuth(self)
user_data = twitter_helper.auth_complete(oauth_token,
oauth_verifier)
logging.info('twitter user_data: ' + str(user_data))
if self.user:
# new association with twitter
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'twitter', str(user_data['user_id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='twitter',
uid=str(user_data['user_id']),
extra_data=user_data
)
social_user.put()
message = _('Twitter association added.')
self.add_message(message, 'success')
else:
message = _('This Twitter account is already in use.')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with twitter
social_user = models.SocialUser.get_by_provider_and_uid('twitter',
str(user_data['user_id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['user_id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# github association
elif provider_name == "github":
# get our request code back from the social login handler above
code = self.request.get('code')
# create our github auth object
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'),
self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'),
self.app.config.get('github_redirect_uri'), scope)
# retrieve the access token using the code and auth object
access_token = github_helper.get_access_token(code)
user_data = github_helper.get_user_info(access_token)
logging.info('github user_data: ' + str(user_data))
if self.user:
# user is already logged in so we set a new association with twitter
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'github', str(user_data['login'])):
social_user = models.SocialUser(
user=user_info.key,
provider='github',
uid=str(user_data['login']),
extra_data=user_data
)
social_user.put()
message = _('Github association added.')
self.add_message(message, 'success')
else:
message = _('This Github account is already in use.')
self.add_message(message, 'error')
self.redirect_to('edit-profile')
else:
# user is not logged in, but is trying to log in via github
social_user = models.SocialUser.get_by_provider_and_uid('github', str(user_data['login']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end github
# facebook association
elif provider_name == "facebook":
code = self.request.get('code')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
token = facebook.get_access_token_from_code(code, callback_url, self.app.config.get('fb_api_key'),
self.app.config.get('fb_secret'))
access_token = token['access_token']
fb = facebook.GraphAPI(access_token)
user_data = fb.get_object('me')
logging.info('facebook user_data: ' + str(user_data))
if self.user:
# new association with facebook
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'facebook', str(user_data['id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='facebook',
uid=str(user_data['id']),
extra_data=user_data
)
social_user.put()
message = _('Facebook association added!')
self.add_message(message, 'success')
else:
message = _('This Facebook account is already in use!')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Facebook
social_user = models.SocialUser.get_by_provider_and_uid('facebook',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# end facebook
# association with linkedin
elif provider_name == "linkedin":
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
authentication = linkedin.LinkedInAuthentication(
self.app.config.get('linkedin_api'),
self.app.config.get('linkedin_secret'),
callback_url,
[linkedin.PERMISSIONS.BASIC_PROFILE, linkedin.PERMISSIONS.EMAIL_ADDRESS])
authentication.authorization_code = self.request.get('code')
access_token = authentication.get_access_token()
link = linkedin.LinkedInApplication(authentication)
u_data = link.get_profile(selectors=['id', 'first-name', 'last-name', 'email-address'])
user_data = {
'first_name': u_data.get('firstName'),
'last_name': u_data.get('lastName'),
'id': u_data.get('id'),
'email': u_data.get('emailAddress')}
self.session['linkedin'] = json.dumps(user_data)
logging.info('linkedin user_data: ' + str(user_data))
if self.user:
# new association with linkedin
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'linkedin', str(user_data['id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='linkedin',
uid=str(user_data['id']),
extra_data=user_data
)
social_user.put()
message = _('Linkedin association added!')
self.add_message(message, 'success')
else:
message = _('This Linkedin account is already in use!')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Linkedin
social_user = models.SocialUser.get_by_provider_and_uid('linkedin',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end linkedin
# google, myopenid, yahoo OpenID Providers
elif provider_name in models.SocialUser.open_id_providers():
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
# get info passed from OpenID Provider
from google.appengine.api import users
current_user = users.get_current_user()
if current_user:
if current_user.federated_identity():
uid = current_user.federated_identity()
else:
uid = current_user.user_id()
email = current_user.email()
else:
message = _('No user authentication information received from %s. '
'Please ensure you are logging in from an authorized OpenID Provider (OP).'
% provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to(
'login')
if self.user:
# add social account to user
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, provider_name, uid):
social_user = models.SocialUser(
user=user_info.key,
provider=provider_name,
uid=uid
)
social_user.put()
message = _('%s association successfully added.' % provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with OpenID Provider
social_user = models.SocialUser.get_by_provider_and_uid(provider_name, uid)
if social_user:
# Social user found. Authenticate the user
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
self.create_account_from_social_provider(provider_name, uid, email, continue_url)
else:
message = _('This authentication method is not yet implemented.')
self.add_message(message, 'warning')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
def create_account_from_social_provider(self, provider_name, uid, email=None, continue_url=None, user_data=None):
"""Social user does not exist yet so create it with the federated identity provided (uid)
and create prerequisite user and log the user account in
"""
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
if models.SocialUser.check_unique_uid(provider_name, uid):
# create user
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
# Assume provider has already verified email address
# if email is provided so set activated to True
auth_id = "%s:%s" % (provider_name, uid)
if email:
unique_properties = ['email']
user_info = self.auth.store.user_model.create_user(
auth_id, unique_properties, email=email,
activated=True
)
else:
user_info = self.auth.store.user_model.create_user(
auth_id, activated=True
)
if not user_info[0]: #user is a tuple
message = _('The account %s is already in use.' % provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('register')
user = user_info[1]
# create social user and associate with user
social_user = models.SocialUser(
user=user.key,
provider=provider_name,
uid=uid,
)
if user_data:
social_user.extra_data = user_data
self.session[provider_name] = json.dumps(user_data) # TODO is this needed?
social_user.put()
# authenticate user
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
message = _('Welcome! You have been registered as a new user '
'and logged in through {}.').format(provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
class DeleteSocialProviderHandler(BaseHandler):
"""
Delete Social association with an account
"""
@user_required
def post(self, provider_name):
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
if len(user_info.get_social_providers_info()['used']) > 1 or (user_info.password is not None):
social_user = models.SocialUser.get_by_user_and_provider(user_info.key, provider_name)
if social_user:
social_user.key.delete()
message = _('%s successfully disassociated.' % provider_name)
self.add_message(message, 'success')
else:
message = _('Social account on %s not found for this user.' % provider_name)
self.add_message(message, 'error')
else:
message = ('Social account on %s cannot be deleted for user.'
' Please create a username and password to delete social account.' % provider_name)
self.add_message(message, 'error')
self.redirect_to('edit-profile')
class LogoutHandler(BaseHandler):
"""
Destroy user session and redirect to login
"""
def get(self):
if self.user:
message = _("You've signed out successfully. Warning: Please clear all cookies and logout "
"of OpenID providers too if you logged in on a public computer.")
self.add_message(message, 'info')
self.auth.unset_session()
# User is logged out, let's try redirecting to login page
try:
self.redirect(self.auth_config['login_url'])
except (AttributeError, KeyError), e:
logging.error("Error logging out: %s" % e)
message = _("User is logged out, but there was an error on the redirection.")
self.add_message(message, 'error')
return self.redirect_to('home')
class RegisterHandler(BaseHandler):
"""
Handler for Sign Up Users
"""
def get(self):
""" Returns a simple HTML form for create a new user """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('register.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
email = self.form.email.data.lower()
password = self.form.password.data.strip()
country = self.form.country.data
tz = self.form.tz.data
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Passing password_raw=password so password will be hashed
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
unique_properties = ['username', 'email']
auth_id = "own:%s" % username
user = self.auth.store.user_model.create_user(
auth_id, unique_properties, password_raw=password,
username=username, name=name, last_name=last_name, email=email,
ip=self.request.remote_addr, country=country, tz=tz
)
if not user[0]: #user is a tuple
if "username" in str(user[1]):
message = _(
'Sorry, The username <strong>{}</strong> is already registered.').format(username)
elif "email" in str(user[1]):
message = _('Sorry, The email <strong>{}</strong> is already registered.').format(email)
else:
message = _('Sorry, The user is already registered.')
self.add_message(message, 'error')
return self.redirect_to('register')
else:
# User registered successfully
# But if the user registered using the form, the user has to check their email to activate the account ???
try:
if not user[1].activated:
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id=user[1].get_id(),
token=models.User.create_auth_token(user[1].get_id()),
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': str(email),
'subject': subject,
'body': body,
})
message = _('You were successfully registered. '
'Please check your email to activate your account.')
self.add_message(message, 'success')
return self.redirect_to('home')
# If the user didn't register using registration form ???
db_user = self.auth.get_user_by_password(user[1].auth_ids[0], password)
# Check Twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user[1].key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user=user[1].key,
provider='twitter',
uid=str(twitter_association_data['id']),
extra_data=twitter_association_data
)
social_user.put()
#check Facebook association
fb_data = json.loads(self.session['facebook'])
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='facebook',
uid=str(fb_data['id']),
extra_data=fb_data
)
social_user.put()
#check LinkedIn association
li_data = json.loads(self.session['linkedin'])
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='linkedin',
uid=str(li_data['id']),
extra_data=li_data
)
social_user.put()
message = _('Welcome <strong>{}</strong>, you are now logged in.').format(username)
self.add_message(message, 'success')
return self.redirect_to('home')
except (AttributeError, KeyError), e:
logging.error('Unexpected error creating the user %s: %s' % (username, e ))
message = _('Unexpected error creating the user %s' % username)
self.add_message(message, 'error')
return self.redirect_to('home')
@webapp2.cached_property
def form(self):
f = forms.RegisterForm(self)
f.country.choices = self.countries_tuple
f.tz.choices = self.tz
return f
class AccountActivationHandler(BaseHandler):
"""
Handler for account activation
"""
def get(self, user_id, token):
try:
if not models.User.validate_auth_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = models.User.get_by_id(long(user_id))
# activate the user's account
user.activated = True
user.put()
# Login User
self.auth.get_user_by_token(int(user_id), token)
# Delete token
models.User.delete_auth_token(user_id, token)
message = _('Congratulations, Your account <strong>{}</strong> has been successfully activated.').format(
user.username)
self.add_message(message, 'success')
self.redirect_to('home')
except (AttributeError, KeyError, InvalidAuthIdError, NameError), e:
logging.error("Error activating an account: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class ResendActivationEmailHandler(BaseHandler):
"""
Handler to resend activation email
"""
def get(self, user_id, token):
try:
if not models.User.validate_resend_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = models.User.get_by_id(long(user_id))
email = user.email
if (user.activated == False):
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id=user.get_id(),
token=models.User.create_auth_token(user.get_id()),
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": user.username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': str(email),
'subject': subject,
'body': body,
})
models.User.delete_resend_token(user_id, token)
message = _('The verification email has been resent to %s. '
'Please check your email to activate your account.' % email)
self.add_message(message, 'success')
return self.redirect_to('home')
else:
message = _('Your account has been activated. Please <a href="/login/">sign in</a> to your account.')
self.add_message(message, 'warning')
return self.redirect_to('home')
except (KeyError, AttributeError), e:
logging.error("Error resending activation email: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class ContactHandler(BaseHandler):
"""
Handler for Contact Form
"""
def get(self):
""" Returns a simple HTML for contact form """
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
if user_info.name or user_info.last_name:
self.form.name.data = user_info.name + " " + user_info.last_name
if user_info.email:
self.form.email.data = user_info.email
params = {
"exception": self.request.get('exception')
}
return self.render_template('contact.html', **params)
def post(self):
""" validate contact form """
if not self.form.validate():
return self.get()
remoteip = self.request.remote_addr
user_agent = self.request.user_agent
exception = self.request.POST.get('exception')
name = self.form.name.data.strip()
email = self.form.email.data.lower()
message = self.form.message.data.strip()
try:
# parsing user_agent and getting which os key to use
# windows uses 'os' while other os use 'flavor'
ua = httpagentparser.detect(user_agent)
_os = ua.has_key('flavor') and 'flavor' or 'os'
operating_system = str(ua[_os]['name']) if "name" in ua[_os] else "-"
if 'version' in ua[_os]:
operating_system += ' ' + str(ua[_os]['version'])
if 'dist' in ua:
operating_system += ' ' + str(ua['dist'])
browser = str(ua['browser']['name']) if 'browser' in ua else "-"
browser_version = str(ua['browser']['version']) if 'browser' in ua else "-"
template_val = {
"name": name,
"email": email,
"browser": browser,
"browser_version": browser_version,
"operating_system": operating_system,
"ip": remoteip,
"message": message
}
except Exception as e:
logging.error("error getting user agent info: %s" % e)
try:
subject = _("Contact") + " " + self.app.config.get('app_name')
# exceptions for error pages that redirect to contact
if exception != "":
subject = subject + " (Exception error: %s)" % exception
body_path = "emails/contact.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': self.app.config.get('contact_recipient'),
'subject': subject,
'body': body,
'sender': self.app.config.get('contact_sender'),
})
message = _('Your message was sent successfully.')
self.add_message(message, 'success')
return self.redirect_to('contact')
except (AttributeError, KeyError), e:
logging.error('Error sending contact form: %s' % e)
message = _('Error sending the message. Please try again later.')
self.add_message(message, 'error')
return self.redirect_to('contact')
@webapp2.cached_property
def form(self):
return forms.ContactForm(self)
class EditProfileHandler(BaseHandler):
"""
Handler for Edit User Profile
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit profile """
params = {}
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
self.form.username.data = user_info.username
self.form.name.data = user_info.name
self.form.last_name.data = user_info.last_name
self.form.country.data = user_info.country
self.form.tz.data = user_info.tz
providers_info = user_info.get_social_providers_info()
if not user_info.password:
params['local_account'] = False
else:
params['local_account'] = True
params['used_providers'] = providers_info['used']
params['unused_providers'] = providers_info['unused']
params['country'] = user_info.country
params['tz'] = user_info.tz
return self.render_template('edit_profile.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
country = self.form.country.data
tz = self.form.tz.data
try:
user_info = models.User.get_by_id(long(self.user_id))
try:
message = ''
# update username if it has changed and it isn't already taken
if username != user_info.username:
user_info.unique_properties = ['username', 'email']
uniques = [
'User.username:%s' % username,
'User.auth_id:own:%s' % username,
]
# Create the unique username and auth_id.
success, existing = Unique.create_multi(uniques)
if success:
# free old uniques
Unique.delete_multi(
['User.username:%s' % user_info.username, 'User.auth_id:own:%s' % user_info.username])
# The unique values were created, so we can save the user.
user_info.username = username
user_info.auth_ids[0] = 'own:%s' % username
message += _('Your new username is <strong>{}</strong>').format(username)
else:
message += _(
'The username <strong>{}</strong> is already taken. Please choose another.').format(
username)
# At least one of the values is not unique.
self.add_message(message, 'error')
return self.get()
user_info.name = name
user_info.last_name = last_name
user_info.country = country
user_info.tz = tz
user_info.put()
message += " " + _('Thanks, your settings have been saved.')
self.add_message(message, 'success')
return self.get()
except (AttributeError, KeyError, ValueError), e:
logging.error('Error updating profile: ' + e)
message = _('Unable to update profile. Please try again later.')
self.add_message(message, 'error')
return self.get()
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
f = forms.EditProfileForm(self)
f.country.choices = self.countries_tuple
f.tz.choices = self.tz
return f
class EditPasswordHandler(BaseHandler):
"""
Handler for Edit User Password
"""
@user_required
def get(self):
""" Returns a simple HTML form for editing password """
params = {}
return self.render_template('edit_password.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
current_password = self.form.current_password.data.strip()
password = self.form.password.data.strip()
try:
user_info = models.User.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
current_password = utils.hashing(current_password, self.app.config.get('salt'))
try:
user = models.User.get_by_auth_password(auth_id, current_password)
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# send email
subject = self.app.config.get('app_name') + " Account Password Changed"
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"email": user.email,
"reset_password_url": self.uri_for("password-reset", _full=True)
}
email_body_path = "emails/password_changed.txt"
email_body = self.jinja2.render_template(email_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': email_body,
'sender': self.app.config.get('contact_sender'),
})
#Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('edit-profile')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-password')
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditPasswordForm(self)
class EditEmailHandler(BaseHandler):
"""
Handler for Edit User's Email
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit email """
params = {}
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
params['current_email'] = user_info.email
return self.render_template('edit_email.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
new_email = self.form.new_email.data.strip()
password = self.form.password.data.strip()
try:
user_info = models.User.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
try:
# authenticate user by its password
user = models.User.get_by_auth_password(auth_id, password)
# if the user change his/her email address
if new_email != user.email:
# check whether the new email has been used by another user
aUser = models.User.get_by_email(new_email)
if aUser is not None:
message = _("The email %s is already registered." % new_email)
self.add_message(message, 'error')
return self.redirect_to("edit-email")
# send email
subject = _("%s Email Changed Notification" % self.app.config.get('app_name'))
user_token = models.User.create_auth_token(self.user_id)
confirmation_url = self.uri_for("email-changed-check",
user_id=user_info.get_id(),
encoded_email=utils.encode(new_email),
token=user_token,
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"new_email": new_email,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
old_body_path = "emails/email_changed_notification_old.txt"
old_body = self.jinja2.render_template(old_body_path, **template_val)
new_body_path = "emails/email_changed_notification_new.txt"
new_body = self.jinja2.render_template(new_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': old_body,
})
taskqueue.add(url=email_url, params={
'to': new_email,
'subject': subject,
'body': new_body,
})
# display successful message
msg = _(
"Please check your new email for confirmation. Your email will be updated after confirmation.")
self.add_message(msg, 'success')
return self.redirect_to('edit-profile')
else:
self.add_message(_("You didn't change your email."), "warning")
return self.redirect_to("edit-email")
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-email')
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditEmailForm(self)
class PasswordResetHandler(BaseHandler):
"""
Password Reset Handler with Captcha
"""
def get(self):
chtml = captcha.displayhtml(
public_key=self.app.config.get('captcha_public_key'),
use_ssl=(self.request.scheme == 'https'),
error=None)
if self.app.config.get('captcha_public_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE" or \
self.app.config.get('captcha_private_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE":
chtml = '<div class="alert alert-error"><strong>Error</strong>: You have to ' \
'<a href="http://www.google.com/recaptcha/whyrecaptcha" target="_blank">sign up ' \
'for API keys</a> in order to use reCAPTCHA.</div>' \
'<input type="hidden" name="recaptcha_challenge_field" value="manual_challenge" />' \
'<input type="hidden" name="recaptcha_response_field" value="manual_challenge" />'
params = {
'captchahtml': chtml,
}
return self.render_template('password_reset.html', **params)
def post(self):
# check captcha
challenge = self.request.POST.get('recaptcha_challenge_field')
response = self.request.POST.get('recaptcha_response_field')
remoteip = self.request.remote_addr
cResponse = captcha.submit(
challenge,
response,
self.app.config.get('captcha_private_key'),
remoteip)
if cResponse.is_valid:
# captcha was valid... carry on..nothing to see here
pass
else:
_message = _('Wrong image verification code. Please try again.')
self.add_message(_message, 'error')
return self.redirect_to('password-reset')
#check if we got an email or username
email_or_username = str(self.request.POST.get('email_or_username')).lower().strip()
if utils.is_email_valid(email_or_username):
user = models.User.get_by_email(email_or_username)
_message = _("If the email address you entered") + " (<strong>%s</strong>) " % email_or_username
else:
auth_id = "own:%s" % email_or_username
user = models.User.get_by_auth_id(auth_id)
_message = _("If the username you entered") + " (<strong>%s</strong>) " % email_or_username
_message = _message + _("is associated with an account in our records, you will receive "
"an email from us with instructions for resetting your password. "
"<br>If you don't receive instructions within a minute or two, "
"check your email's spam and junk filters, or ") + \
'<a href="' + self.uri_for('contact') + '">' + _('contact us') + '</a> ' + _(
"for further assistance.")
if user is not None:
user_id = user.get_id()
token = models.User.create_auth_token(user_id)
email_url = self.uri_for('taskqueue-send-email')
reset_url = self.uri_for('password-reset-check', user_id=user_id, token=token, _full=True)
subject = _("%s Password Assistance" % self.app.config.get('app_name'))
# load email's template
template_val = {
"username": user.username,
"email": user.email,
"reset_password_url": reset_url,
"support_url": self.uri_for("contact", _full=True),
"app_name": self.app.config.get('app_name'),
}
body_path = "emails/reset_password.txt"
body = self.jinja2.render_template(body_path, **template_val)
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': body,
'sender': self.app.config.get('contact_sender'),
})
self.add_message(_message, 'warning')
return self.redirect_to('login')
class PasswordResetCompleteHandler(BaseHandler):
"""
Handler to process the link of reset password that received the user
"""
def get(self, user_id, token):
verify = models.User.get_by_auth_token(int(user_id), token)
params = {}
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid. '
'Enter your details again below to get a new one.')
self.add_message(message, 'warning')
return self.redirect_to('password-reset')
else:
return self.render_template('password_reset_complete.html', **params)
def post(self, user_id, token):
verify = models.User.get_by_auth_token(int(user_id), token)
user = verify[0]
password = self.form.password.data.strip()
if user and self.form.validate():
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# Delete token
models.User.delete_auth_token(int(user_id), token)
# Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('home')
else:
self.add_message(_('The two passwords must match.'), 'error')
return self.redirect_to('password-reset-check', user_id=user_id, token=token)
@webapp2.cached_property
def form(self):
return forms.PasswordResetCompleteForm(self)
class EmailChangedCompleteHandler(BaseHandler):
"""
Handler for completed email change
Will be called when the user click confirmation link from email
"""
def get(self, user_id, encoded_email, token):
verify = models.User.get_by_auth_token(int(user_id), token)
email = utils.decode(encoded_email)
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid.')
self.add_message(message, 'warning')
self.redirect_to('home')
else:
# save new email
user = verify[0]
user.email = email
user.put()
# delete token
models.User.delete_auth_token(int(user_id), token)
# add successful message and redirect
message = _('Your email has been successfully updated.')
self.add_message(message, 'success')
self.redirect_to('edit-profile')
class HomeRequestHandler(RegisterBaseHandler):
"""
Handler to show the home page
"""
def get(self):
""" Returns a simple HTML form for home """
params = {}
return self.render_template('home.html', **params)
| mats116/ElasticBigQuery | boilerplate/handlers.py | Python | lgpl-3.0 | 65,902 | [
"VisIt"
] | f6c4b3477a8524b906cee2cd0e74cc3b37e32d603f9145394d2f252ab5d40ca9 |
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import json
# Django
from django.core import mail
from django.core.cache import cache
from django.core.cache.utils import make_template_fragment_key
from django.template import (
Context,
Template,
)
from django.urls import reverse
# wger
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
STATUS_CODES_FAIL,
WgerDeleteTestCase,
WgerTestCase,
)
from wger.exercises.models import (
Exercise,
ExerciseCategory,
Muscle,
)
from wger.utils.cache import cache_mapper
from wger.utils.constants import WORKOUT_TAB
from wger.utils.helpers import random_string
class ExerciseRepresentationTestCase(WgerTestCase):
"""
Test the representation of a model
"""
def test_representation(self):
"""
Test that the representation of an object is correct
"""
self.assertEqual("{0}".format(Exercise.objects.get(pk=1)), 'An exercise')
class ExerciseShareButtonTestCase(WgerTestCase):
"""
Test that the share button is correctly displayed and hidden
"""
def test_share_button(self):
exercise = Exercise.objects.get(pk=1)
url = exercise.get_absolute_url()
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(url)
self.assertTrue(response.context['show_shariff'])
class ExerciseIndexTestCase(WgerTestCase):
def exercise_index(self, logged_in=True, demo=False, admin=False):
"""
Tests the exercise overview page
"""
response = self.client.get(reverse('exercise:exercise:overview'))
# Page exists
self.assertEqual(response.status_code, 200)
# Correct tab is selected
self.assertEqual(response.context['active_tab'], WORKOUT_TAB)
# Correct categories are shown
category_1 = response.context['exercises'][0].exercise_base.category
self.assertEqual(category_1.id, 2)
self.assertEqual(category_1.name, "Another category")
category_2 = response.context['exercises'][1].exercise_base.category
self.assertEqual(category_2.id, 3)
self.assertEqual(category_2.name, "Yet another category")
# Correct exercises in the categories
exercise_bases_1 = category_1.exercisebase_set.all()
exercises_1 = exercise_bases_1[0].exercises.all()
exercises_2 = exercise_bases_1[1].exercises.all()
exercise_1 = exercises_1[0]
exercise_2 = exercises_2[0]
self.assertEqual(exercise_1.id, 1)
self.assertEqual(exercise_1.name, "An exercise")
self.assertEqual(exercise_2.id, 2)
self.assertEqual(exercise_2.name, "Very cool exercise")
self.assertContains(response, 'Add new exercise')
# Only authorized users see the edit links
if admin:
self.assertNotContains(response, 'Only registered users can do this')
if logged_in and not demo:
self.assertNotContains(response, 'Only registered users can do this')
if logged_in and demo:
self.assertContains(response, 'Only registered users can do this')
def test_exercise_index_editor(self):
"""
Tests the exercise overview page as a logged in user with editor rights
"""
self.user_login('admin')
self.exercise_index(admin=True)
def test_exercise_index_non_editor(self):
"""
Tests the exercise overview page as a logged in user without editor rights
"""
self.user_login('test')
self.exercise_index()
def test_exercise_index_demo_user(self):
"""
Tests the exercise overview page as a logged in demo user
"""
self.user_login('demo')
self.exercise_index(demo=True)
def test_exercise_index_logged_out(self):
"""
Tests the exercise overview page as an anonymous (logged out) user
"""
self.exercise_index(logged_in=False)
def test_empty_exercise_index(self):
"""
Test the index when there are no categories
"""
self.user_login('admin')
ExerciseCategory.objects.all().delete()
response = self.client.get(reverse('exercise:exercise:overview'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No categories')
class ExerciseDetailTestCase(WgerTestCase):
"""
Tests the exercise details page
"""
def exercise_detail(self, editor=False):
"""
Tests the exercise details page
"""
response = self.client.get(reverse('exercise:exercise:view', kwargs={'id': 1}))
self.assertEqual(response.status_code, 200)
# Correct tab is selected
self.assertEqual(response.context['active_tab'], WORKOUT_TAB)
# Exercise loaded correct muscles
exercise_1 = response.context['exercise']
self.assertEqual(exercise_1.id, 1)
muscles = exercise_1.exercise_base.muscles.all()
muscle_1 = muscles[0]
muscle_2 = muscles[1]
self.assertEqual(muscle_1.id, 1)
self.assertEqual(muscle_2.id, 2)
# Only authorized users see the edit links
if editor:
self.assertContains(response, 'Edit')
self.assertContains(response, 'Delete')
self.assertContains(response, 'Add new comment')
self.assertNotContains(response, 'Exercise is pending review')
else:
self.assertNotContains(response, 'Edit')
self.assertNotContains(response, 'Delete')
self.assertNotContains(response, 'Add new comment')
self.assertNotContains(response, 'Exercise is pending review')
# Ensure that non-existent exercises throw a 404.
response = self.client.get(reverse('exercise:exercise:view', kwargs={'id': 42}))
self.assertEqual(response.status_code, 404)
def test_exercise_detail_editor(self):
"""
Tests the exercise details page as a logged in user with editor rights
"""
self.user_login('admin')
self.exercise_detail(editor=True)
def test_exercise_detail_non_editor(self):
"""
Tests the exercise details page as a logged in user without editor rights
"""
self.user_login('test')
self.exercise_detail(editor=False)
def test_exercise_detail_logged_out(self):
"""
Tests the exercise details page as an anonymous (logged out) user
"""
self.exercise_detail(editor=False)
class ExercisesTestCase(WgerTestCase):
"""
Exercise test case
"""
def add_exercise_user_fail(self):
"""
Helper function to test adding exercises by users that aren't authorized
"""
# Add an exercise
count_before = Exercise.objects.count()
response = self.client.post(
reverse('exercise:exercise:add'), {
'name_original': random_string(),
'license': 1,
'exercise_base': {
'category': 2,
'muscles': [1, 2]
}
}
)
count_after = Exercise.objects.count()
self.assertIn(response.status_code, STATUS_CODES_FAIL)
# Exercise was not added
self.assertEqual(count_before, count_after)
def test_add_exercise_temp_user(self):
"""
Tests adding an exercise with a logged in demo user
"""
self.user_login('demo')
self.add_exercise_user_fail()
def test_add_exercise_no_user(self):
"""
Tests adding an exercise with a logged out (anonymous) user
"""
self.user_logout()
self.add_exercise_user_fail()
self.user_logout()
def test_add_exercise_name_too_similar_fail(self):
"""
Tests that adding an exercise with a name that is too similar
to an existing exercise fails
"""
count_before = Exercise.objects.count()
response = self.client.post(
reverse('exercise:exercise:add'), {
'category': 2,
'name_original': 'Squats',
'license': 1,
'muscles': [1, 2]
}
)
count_after = Exercise.objects.count()
self.assertIn(response.status_code, STATUS_CODES_FAIL)
# Exercise was not added
self.assertEqual(count_before, count_after)
def add_exercise_success(self, admin=False):
"""
Tests adding/editing an exercise with a user with enough rights to do this
"""
# Add an exercise
count_before = Exercise.objects.count()
description = 'a nice, long and accurate description for the exercise'
name_original = random_string()
response = self.client.post(
reverse('exercise:exercise:add'), {
'name_original': name_original,
'license': 1,
'description': description,
'category': 2,
'muscles': [1, 2]
}
)
count_after = Exercise.objects.count()
self.assertEqual(response.status_code, 302)
new_location = response['Location']
self.assertEqual(count_before + 1, count_after, 'Exercise was not added')
response = self.client.get(new_location)
exercise_id = response.context['exercise'].id
# Exercise was saved
exercise = Exercise.objects.get(pk=exercise_id)
if admin:
self.assertEqual(exercise.license_author, 'testserver')
self.assertEqual(exercise.status, Exercise.STATUS_ACCEPTED)
else:
self.assertEqual(exercise.license_author, 'test')
self.assertEqual(exercise.status, Exercise.STATUS_PENDING)
response = self.client.get(reverse('exercise:exercise:view', kwargs={'id': exercise_id}))
self.assertEqual(response.status_code, 200)
# Navigation tab
self.assertEqual(response.context['active_tab'], WORKOUT_TAB)
exercise_1 = Exercise.objects.get(pk=exercise_id)
self.assertEqual(exercise_1.name, name_original)
# Wrong category - adding
response = self.client.post(
reverse('exercise:exercise:add'), {
'category': 111,
'name_original': random_string(),
'license': 1,
'category': 111,
'muscles': [1, 2]
}
)
self.assertTrue(response.context['form'].errors['category'])
# Wrong category - editing
response = self.client.post(
reverse('exercise:exercise:edit', kwargs={'pk': '1'}), {
'category': 111,
'name_original': random_string(),
'license': 1,
'category': 111,
'muscles': [1, 2]
}
)
if admin:
self.assertTrue(response.context['form'].errors['category'])
else:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
# No muscles - adding
response = self.client.post(
reverse('exercise:exercise:add'), {
'category': 1,
'name_original': random_string(),
'license': 1,
'category': 1,
'muscles': []
}
)
self.assertEqual(response.status_code, 302)
# No muscles - editing
response = self.client.post(
reverse('exercise:exercise:edit', kwargs={'pk': '1'}), {
'category': 1,
'name_original': random_string(),
'license': 1,
'category': 1,
'muscles': []
}
)
if admin:
self.assertEqual(response.status_code, 302)
else:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
def test_add_exercise_success(self):
"""
Tests adding/editing an exercise with a user with enough rights to do this
"""
self.user_login('admin')
self.add_exercise_success(admin=True)
def test_add_exercise_user_no_rights(self):
"""
Tests adding an exercise with a user without enough rights to do this
"""
self.user_login('test')
self.add_exercise_success(admin=False)
self.assertEqual(len(mail.outbox), 2)
def search_exercise(self, fail=True):
"""
Helper function to test searching for exercises
"""
# 1 hit, "Very cool exercise"
response = self.client.get(reverse('exercise-search'), {'term': 'cool'})
self.assertEqual(response.status_code, 200)
result = json.loads(response.content.decode('utf8'))
self.assertEqual(len(result), 1)
self.assertEqual(result['suggestions'][0]['value'], 'Very cool exercise')
self.assertEqual(result['suggestions'][0]['data']['id'], 2)
self.assertEqual(result['suggestions'][0]['data']['category'], 'Another category')
self.assertEqual(result['suggestions'][0]['data']['image'], None)
self.assertEqual(result['suggestions'][0]['data']['image_thumbnail'], None)
# 0 hits, "Pending exercise"
response = self.client.get(reverse('exercise-search'), {'term': 'Pending'})
self.assertEqual(response.status_code, 200)
result = json.loads(response.content.decode('utf8'))
self.assertEqual(len(result['suggestions']), 0)
def test_search_exercise_anonymous(self):
"""
Test deleting an exercise by an anonymous user
"""
self.search_exercise()
def test_search_exercise_logged_in(self):
"""
Test deleting an exercise by a logged in user
"""
self.user_login('test')
self.search_exercise()
class DeleteExercisesTestCase(WgerDeleteTestCase):
"""
Exercise test case
"""
object_class = Exercise
url = 'exercise:exercise:delete'
pk = 2
user_success = 'admin'
user_fail = 'test'
class ExercisesCacheTestCase(WgerTestCase):
"""
Exercise cache test case
"""
def test_exercise_overview(self):
"""
Test the exercise overview cache is correctly generated on visit
"""
self.assertFalse(cache.get(make_template_fragment_key('exercise-overview', [2])))
self.client.get(reverse('exercise:exercise:overview'))
self.assertTrue(cache.get(make_template_fragment_key('exercise-overview', [2])))
def test_exercise_detail(self):
"""
Test that the exercise detail cache is correctly generated on visit
"""
def test_overview_cache_update(self):
"""
Test that the template cache for the overview is correctly reseted when
performing certain operations
"""
self.assertFalse(cache.get(make_template_fragment_key('muscle-overview', [2])))
self.assertFalse(cache.get(make_template_fragment_key('muscle-overview-search', [2])))
self.assertFalse(cache.get(make_template_fragment_key('exercise-overview', [2])))
self.client.get(reverse('exercise:exercise:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
old_muscle_overview = cache.get(make_template_fragment_key('muscle-overview', [2]))
old_exercise_overview = cache.get(make_template_fragment_key('exercise-overview', [2]))
exercise = Exercise.objects.get(pk=2)
exercise.name = 'Very cool exercise 2'
exercise.description = 'New description'
exercise.exercise_base.muscles_secondary.add(Muscle.objects.get(pk=2))
exercise.save()
self.assertFalse(cache.get(make_template_fragment_key('muscle-overview', [2])))
self.assertFalse(cache.get(make_template_fragment_key('exercise-overview', [2])))
self.client.get(reverse('exercise:exercise:overview'))
self.client.get(reverse('exercise:muscle:overview'))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
new_muscle_overview = cache.get(make_template_fragment_key('muscle-overview', [2]))
new_exercise_overview = cache.get(make_template_fragment_key('exercise-overview', [2]))
self.assertNotEqual(old_exercise_overview, new_exercise_overview)
self.assertNotEqual(old_muscle_overview, new_muscle_overview)
def test_muscles_cache_update_on_delete(self):
"""
Test that the template cache for the overview is correctly reset when
performing certain operations
"""
self.assertFalse(cache.get(make_template_fragment_key('exercise-detail-muscles', ["2-2"])))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
self.assertTrue(cache.get(make_template_fragment_key('exercise-detail-muscles', ["2-2"])))
muscle = Muscle.objects.get(pk=2)
muscle.delete()
self.assertFalse(cache.get(make_template_fragment_key('exercise-detail-muscles', ["2-2"])))
def test_muscles_cache_update_on_update(self):
"""
Test that the template cache for the overview is correctly reset when
performing certain operations
"""
self.assertFalse(cache.get(make_template_fragment_key('exercise-detail-muscles', ["2-2"])))
self.client.get(reverse('exercise:exercise:view', kwargs={'id': 2}))
self.assertTrue(cache.get(make_template_fragment_key('exercise-detail-muscles', ["2-2"])))
muscle = Muscle.objects.get(pk=2)
muscle.name = 'foo'
muscle.save()
self.assertFalse(cache.get(make_template_fragment_key('exercise-detail-muscles', ["2-2"])))
class MuscleTemplateTagTest(WgerTestCase):
def test_render_main_muscles(self):
"""
Test that the tag renders only the main muscles
"""
context = Context({'muscles': Muscle.objects.get(pk=2)})
template = Template('{% load wger_extras %}'
'{% render_muscles muscles %}')
rendered_template = template.render(context)
self.assertIn('images/muscles/main/muscle-2.svg', rendered_template)
self.assertNotIn('images/muscles/secondary/', rendered_template)
self.assertIn('images/muscles/muscular_system_back.svg', rendered_template)
def test_render_main_muscles_empty_secondary(self):
"""
Test that the tag works when giben main muscles and empty secondary ones
"""
context = Context({"muscles": Muscle.objects.get(pk=2), "muscles_sec": []})
template = Template('{% load wger_extras %}'
'{% render_muscles muscles muscles_sec %}')
rendered_template = template.render(context)
self.assertIn('images/muscles/main/muscle-2.svg', rendered_template)
self.assertNotIn('images/muscles/secondary/', rendered_template)
self.assertIn('images/muscles/muscular_system_back.svg', rendered_template)
def test_render_secondary_muscles(self):
"""
Test that the tag renders only the secondary muscles
"""
context = Context({'muscles': Muscle.objects.get(pk=1)})
template = Template('{% load wger_extras %}'
'{% render_muscles muscles_sec=muscles %}')
rendered_template = template.render(context)
self.assertIn('images/muscles/secondary/muscle-1.svg', rendered_template)
self.assertNotIn('images/muscles/main/', rendered_template)
self.assertIn('images/muscles/muscular_system_front.svg', rendered_template)
def test_render_secondary_muscles_empty_primary(self):
"""
Test that the tag works when given secondary muscles and empty main ones
"""
context = Context({'muscles_sec': Muscle.objects.get(pk=1), 'muscles': []})
template = Template('{% load wger_extras %}'
'{% render_muscles muscles muscles_sec %}')
rendered_template = template.render(context)
self.assertIn('images/muscles/secondary/muscle-1.svg', rendered_template)
self.assertNotIn('images/muscles/main/', rendered_template)
self.assertIn('images/muscles/muscular_system_front.svg', rendered_template)
def test_render_secondary_muscles_list(self):
"""
Test that the tag works when given a list for secondary muscles and empty main ones
"""
context = Context({'muscles_sec': Muscle.objects.filter(is_front=True), 'muscles': []})
template = Template('{% load wger_extras %}'
'{% render_muscles muscles muscles_sec %}')
rendered_template = template.render(context)
self.assertIn('images/muscles/secondary/muscle-1.svg', rendered_template)
self.assertNotIn('images/muscles/secondary/muscle-2.svg', rendered_template)
self.assertNotIn('images/muscles/secondary/muscle-3.svg', rendered_template)
self.assertIn('images/muscles/muscular_system_front.svg', rendered_template)
self.assertNotIn('images/muscles/muscular_system_back.svg', rendered_template)
def test_render_muscle_list(self):
"""
Test that the tag works when given a list for main and secondary muscles
"""
context = Context(
{
'muscles_sec': Muscle.objects.filter(id__in=[5, 6]),
'muscles': Muscle.objects.filter(id__in=[1, 4])
}
)
template = Template('{% load wger_extras %}'
'{% render_muscles muscles muscles_sec %}')
rendered_template = template.render(context)
self.assertIn('images/muscles/main/muscle-1.svg', rendered_template)
self.assertNotIn('images/muscles/main/muscle-2.svg', rendered_template)
self.assertNotIn('images/muscles/main/muscle-3.svg', rendered_template)
self.assertIn('images/muscles/main/muscle-4.svg', rendered_template)
self.assertIn('images/muscles/secondary/muscle-5.svg', rendered_template)
self.assertIn('images/muscles/secondary/muscle-6.svg', rendered_template)
self.assertIn('images/muscles/muscular_system_front.svg', rendered_template)
self.assertNotIn('images/muscles/muscular_system_back.svg', rendered_template)
def test_render_empty(self):
"""
Test that the tag works when given empty input
"""
context = Context({'muscles': [], 'muscles_sec': []})
template = Template('{% load wger_extras %}'
'{% render_muscles muscles muscles_sec %}')
rendered_template = template.render(context)
self.assertEqual(rendered_template, "\n\n")
def test_render_no_parameters(self):
"""
Test that the tag works when given no parameters
"""
template = Template('{% load wger_extras %}'
'{% render_muscles %}')
rendered_template = template.render(Context({}))
self.assertEqual(rendered_template, "\n\n")
class WorkoutCacheTestCase(WgerTestCase):
"""
Workout cache test case
"""
def test_canonical_form_cache_save(self):
"""
Tests the workout cache when saving
"""
exercise = Exercise.objects.get(pk=2)
for setting in exercise.setting_set.all():
setting.set.exerciseday.training.canonical_representation
workout_id = setting.set.exerciseday.training_id
self.assertTrue(cache.get(cache_mapper.get_workout_canonical(workout_id)))
exercise.save()
self.assertFalse(cache.get(cache_mapper.get_workout_canonical(workout_id)))
def test_canonical_form_cache_delete(self):
"""
Tests the workout cache when deleting
"""
exercise = Exercise.objects.get(pk=2)
workout_ids = []
for setting in exercise.setting_set.all():
workout_id = setting.set.exerciseday.training_id
workout_ids.append(workout_id)
setting.set.exerciseday.training.canonical_representation
self.assertTrue(cache.get(cache_mapper.get_workout_canonical(workout_id)))
exercise.delete()
for workout_id in workout_ids:
self.assertFalse(cache.get(cache_mapper.get_workout_canonical(workout_id)))
# TODO: fix test, all registered users can upload exercises
class ExerciseApiTestCase(
api_base_test.BaseTestCase, api_base_test.ApiBaseTestCase, api_base_test.ApiGetTestCase
):
"""
Tests the exercise overview resource
"""
pk = 1
resource = Exercise
private_resource = False
class ExerciseInfoApiTestCase(
api_base_test.BaseTestCase,
api_base_test.ApiBaseTestCase,
api_base_test.ApiGetTestCase,
):
"""
Tests the exercise info resource
"""
pk = 1
private_resource = False
def get_resource_name(self):
return 'exerciseinfo'
| wger-project/wger | wger/exercises/tests/test_exercise.py | Python | agpl-3.0 | 25,873 | [
"VisIt"
] | 20592ca8c4e0b1430d7d38d0ce1f103b29dab12823631b2fc24798759f1a6b06 |
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_pool
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Pool Avi RESTful Object
description:
- This module is used to configure Pool object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.3"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
a_pool:
description:
- Name of container cloud application that constitutes a pool in a a-b pool configuration, if different from vs app.
ab_pool:
description:
- A/b pool configuration.
ab_priority:
description:
- Priority of this pool in a a-b pool pair.
- Internally used.
apic_epg_name:
description:
- Synchronize cisco apic epg members with pool servers.
application_persistence_profile_ref:
description:
- Persistence will ensure the same user sticks to the same server for a desired duration of time.
- It is a reference to an object of type applicationpersistenceprofile.
autoscale_launch_config_ref:
description:
- If configured then avi will trigger orchestration of pool server creation and deletion.
- It is only supported for container clouds like mesos, opensift, kubernates, docker etc.
- It is a reference to an object of type autoscalelaunchconfig.
autoscale_networks:
description:
- Network ids for the launch configuration.
autoscale_policy_ref:
description:
- Reference to server autoscale policy.
- It is a reference to an object of type serverautoscalepolicy.
capacity_estimation:
description:
- Inline estimation of capacity of servers.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
capacity_estimation_ttfb_thresh:
description:
- The maximum time-to-first-byte of a server.
- Allowed values are 1-5000.
- Special values are 0 - 'automatic'.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
cloud_config_cksum:
description:
- Checksum of cloud configuration for pool.
- Internally set by cloud connector.
cloud_ref:
description:
- It is a reference to an object of type cloud.
connection_ramp_duration:
description:
- Duration for which new connections will be gradually ramped up to a server recently brought online.
- Useful for lb algorithms that are least connection based.
- Allowed values are 1-300.
- Special values are 0 - 'immediate'.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
created_by:
description:
- Creator name.
default_server_port:
description:
- Traffic sent to servers will use this destination server port unless overridden by the server's specific port attribute.
- The ssl checkbox enables avi to server encryption.
- Allowed values are 1-65535.
- Default value when not specified in API or module is interpreted by Avi Controller as 80.
description:
description:
- A description of the pool.
domain_name:
description:
- Comma separated list of domain names which will be used to verify the common names or subject alternative names presented by server certificates.
- It is performed only when common name check host_check_enabled is enabled.
east_west:
description:
- Inherited config from virtualservice.
enabled:
description:
- Enable or disable the pool.
- Disabling will terminate all open connections and pause health monitors.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
external_autoscale_groups:
description:
- Names of external auto-scale groups for pool servers.
- Currently available only for aws.
- Field introduced in 17.1.2.
fail_action:
description:
- Enable an action - close connection, http redirect or local http response - when a pool failure happens.
- By default, a connection will be closed, in case the pool experiences a failure.
fewest_tasks_feedback_delay:
description:
- Periodicity of feedback for fewest tasks server selection algorithm.
- Allowed values are 1-300.
- Default value when not specified in API or module is interpreted by Avi Controller as 10.
graceful_disable_timeout:
description:
- Used to gracefully disable a server.
- Virtual service waits for the specified time before terminating the existing connections to the servers that are disabled.
- Allowed values are 1-60.
- Special values are 0 - 'immediate', -1 - 'infinite'.
- Default value when not specified in API or module is interpreted by Avi Controller as 1.
health_monitor_refs:
description:
- Verify server health by applying one or more health monitors.
- Active monitors generate synthetic traffic from each service engine and mark a server up or down based on the response.
- The passive monitor listens only to client to server communication.
- It raises or lowers the ratio of traffic destined to a server based on successful responses.
- It is a reference to an object of type healthmonitor.
host_check_enabled:
description:
- Enable common name check for server certificate.
- If enabled and no explicit domain name is specified, avi will use the incoming host header to do the match.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
inline_health_monitor:
description:
- The passive monitor will monitor client to server connections and requests and adjust traffic load to servers based on successful responses.
- This may alter the expected behavior of the lb method, such as round robin.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ipaddrgroup_ref:
description:
- Use list of servers from ip address group.
- It is a reference to an object of type ipaddrgroup.
lb_algorithm:
description:
- The load balancing algorithm will pick a server within the pool's list of available servers.
- Enum options - LB_ALGORITHM_LEAST_CONNECTIONS, LB_ALGORITHM_ROUND_ROBIN, LB_ALGORITHM_FASTEST_RESPONSE, LB_ALGORITHM_CONSISTENT_HASH,
- LB_ALGORITHM_LEAST_LOAD, LB_ALGORITHM_FEWEST_SERVERS, LB_ALGORITHM_RANDOM, LB_ALGORITHM_FEWEST_TASKS, LB_ALGORITHM_NEAREST_SERVER,
- LB_ALGORITHM_CORE_AFFINITY.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_LEAST_CONNECTIONS.
lb_algorithm_consistent_hash_hdr:
description:
- Http header name to be used for the hash key.
lb_algorithm_core_nonaffinity:
description:
- Degree of non-affinity for core afffinity based server selection.
- Allowed values are 1-65535.
- Field introduced in 17.1.3.
- Default value when not specified in API or module is interpreted by Avi Controller as 2.
version_added: "2.4"
lb_algorithm_hash:
description:
- Criteria used as a key for determining the hash between the client and server.
- Enum options - LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS, LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS_AND_PORT,
- LB_ALGORITHM_CONSISTENT_HASH_URI, LB_ALGORITHM_CONSISTENT_HASH_CUSTOM_HEADER.
- Default value when not specified in API or module is interpreted by Avi Controller as LB_ALGORITHM_CONSISTENT_HASH_SOURCE_IP_ADDRESS.
max_concurrent_connections_per_server:
description:
- The maximum number of concurrent connections allowed to each server within the pool.
- Note applied value will be no less than the number of service engines that the pool is placed on.
- If set to 0, no limit is applied.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
max_conn_rate_per_server:
description:
- Rate limit connections to each server.
name:
description:
- The name of the pool.
required: true
networks:
description:
- (internal-use) networks designated as containing servers for this pool.
- The servers may be further narrowed down by a filter.
- This field is used internally by avi, not editable by the user.
nsx_securitygroup:
description:
- A list of nsx service groups where the servers for the pool are created.
- Field introduced in 17.1.1.
pki_profile_ref:
description:
- Avi will validate the ssl certificate present by a server against the selected pki profile.
- It is a reference to an object of type pkiprofile.
placement_networks:
description:
- Manually select the networks and subnets used to provide reachability to the pool's servers.
- Specify the subnet using the following syntax 10-1-1-0/24.
- Use static routes in vrf configuration when pool servers are not directly connected butroutable from the service engine.
prst_hdr_name:
description:
- Header name for custom header persistence.
request_queue_depth:
description:
- Minimum number of requests to be queued when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as 128.
request_queue_enabled:
description:
- Enable request queue when pool is full.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_server_name:
description:
- Rewrite incoming host header to server name of the server to which the request is proxied.
- Enabling this feature rewrites host header for requests to all servers in the pool.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
rewrite_host_header_to_sni:
description:
- If sni server name is specified, rewrite incoming host header to the sni server name.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_auto_scale:
description:
- Server autoscale.
- Not used anymore.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
server_count:
description:
- Number of server_count.
- Default value when not specified in API or module is interpreted by Avi Controller as 0.
server_name:
description:
- Fully qualified dns hostname which will be used in the tls sni extension in server connections if sni is enabled.
- If no value is specified, avi will use the incoming host header instead.
server_reselect:
description:
- Server reselect configuration for http requests.
servers:
description:
- The pool directs load balanced traffic to this list of destination servers.
- The servers can be configured by ip address, name, network or via ip address group.
sni_enabled:
description:
- Enable tls sni for server connections.
- If disabled, avi will not send the sni extension as part of the handshake.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
ssl_key_and_certificate_ref:
description:
- Service engines will present a client ssl certificate to the server.
- It is a reference to an object of type sslkeyandcertificate.
ssl_profile_ref:
description:
- When enabled, avi re-encrypts traffic to the backend servers.
- The specific ssl profile defines which ciphers and ssl versions will be supported.
- It is a reference to an object of type sslprofile.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
use_service_port:
description:
- Do not translate the client's destination port when sending the connection to the server.
- The pool or servers specified service port will still be used for health monitoring.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
uuid:
description:
- Uuid of the pool.
vrf_ref:
description:
- Virtual routing context that the pool is bound to.
- This is used to provide the isolation of the set of networks the pool is attached to.
- The pool inherits the virtual routing conext of the virtual service, and this field is used only internally, and is set by pb-transform.
- It is a reference to an object of type vrfcontext.
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a Pool with two servers and HTTP monitor
avi_pool:
controller: 10.10.1.20
username: avi_user
password: avi_password
name: testpool1
description: testpool1
state: present
health_monitor_refs:
- '/api/healthmonitor?name=System-HTTP'
servers:
- ip:
addr: 10.10.2.20
type: V4
- ip:
addr: 10.10.2.21
type: V4
'''
RETURN = '''
obj:
description: Pool (api/pool) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
a_pool=dict(type='str',),
ab_pool=dict(type='dict',),
ab_priority=dict(type='int',),
apic_epg_name=dict(type='str',),
application_persistence_profile_ref=dict(type='str',),
autoscale_launch_config_ref=dict(type='str',),
autoscale_networks=dict(type='list',),
autoscale_policy_ref=dict(type='str',),
capacity_estimation=dict(type='bool',),
capacity_estimation_ttfb_thresh=dict(type='int',),
cloud_config_cksum=dict(type='str',),
cloud_ref=dict(type='str',),
connection_ramp_duration=dict(type='int',),
created_by=dict(type='str',),
default_server_port=dict(type='int',),
description=dict(type='str',),
domain_name=dict(type='list',),
east_west=dict(type='bool',),
enabled=dict(type='bool',),
external_autoscale_groups=dict(type='list',),
fail_action=dict(type='dict',),
fewest_tasks_feedback_delay=dict(type='int',),
graceful_disable_timeout=dict(type='int',),
health_monitor_refs=dict(type='list',),
host_check_enabled=dict(type='bool',),
inline_health_monitor=dict(type='bool',),
ipaddrgroup_ref=dict(type='str',),
lb_algorithm=dict(type='str',),
lb_algorithm_consistent_hash_hdr=dict(type='str',),
lb_algorithm_core_nonaffinity=dict(type='int',),
lb_algorithm_hash=dict(type='str',),
max_concurrent_connections_per_server=dict(type='int',),
max_conn_rate_per_server=dict(type='dict',),
name=dict(type='str', required=True),
networks=dict(type='list',),
nsx_securitygroup=dict(type='list',),
pki_profile_ref=dict(type='str',),
placement_networks=dict(type='list',),
prst_hdr_name=dict(type='str',),
request_queue_depth=dict(type='int',),
request_queue_enabled=dict(type='bool',),
rewrite_host_header_to_server_name=dict(type='bool',),
rewrite_host_header_to_sni=dict(type='bool',),
server_auto_scale=dict(type='bool',),
server_count=dict(type='int',),
server_name=dict(type='str',),
server_reselect=dict(type='dict',),
servers=dict(type='list',),
sni_enabled=dict(type='bool',),
ssl_key_and_certificate_ref=dict(type='str',),
ssl_profile_ref=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
use_service_port=dict(type='bool',),
uuid=dict(type='str',),
vrf_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'pool',
set([]))
if __name__ == '__main__':
main()
| fernandezcuesta/ansible | lib/ansible/modules/network/avi/avi_pool.py | Python | gpl-3.0 | 18,914 | [
"VisIt"
] | ed724c3a096fba5d3e865f561d6563e348327cf0e80d049433184ef3f15505b5 |
#!/usr/bin/env python3
#
# Copyright (C) 2013-2017(H)
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
import espressopp
import math
import unittest
# initial parameters of the simulation
L = 5
box = (L, L, L)
rc = 1.5
sigma = 1.
phi = 0.
class makeConf(unittest.TestCase):
def setUp(self):
system, integrator = espressopp.standard_system.Default(box, rc=rc, skin=0.3, dt=0.005, temperature=1.)
system.storage.addParticles([[1,0,espressopp.Real3D(0,0,1)]],'id','type','pos')
system.storage.addParticles([[2,0,espressopp.Real3D(0,0,1+math.pow(2,1./6.))]],'id','type','pos')
system.storage.decompose()
# non-bonded LJcos potential
vl = espressopp.VerletList(system, cutoff=rc)
LJcos = espressopp.interaction.LJcos(phi=phi)
LJcosInter = espressopp.interaction.VerletListLJcos(vl)
LJcosInter.setPotential(type1=0, type2=0, potential=LJcos)
system.addInteraction(LJcosInter)
# set self
self.system = system
self.LJcosInter = LJcosInter
class TestLJcos(makeConf):
def test_ljcos(self):
for phi in range (0, 11):
phi = 0.1 * phi
# update potential
LJcos = espressopp.interaction.LJcos(phi=phi)
self.LJcosInter.setPotential(type1=0, type2=0, potential=LJcos)
Epot = espressopp.analysis.EnergyPot(self.system)
print('phi =', phi, 'Epot = ', Epot.compute())
self.assertAlmostEqual(Epot.compute(), -phi, places=10)
if __name__ == '__main__':
unittest.main()
| espressopp/espressopp | testsuite/ljcos/testLJCos.py | Python | gpl-3.0 | 2,331 | [
"ESPResSo"
] | 50daca67ef5024b96cf416efcd96e029afa3711475d14ac5e0b4d759e4ff5a43 |
import pyparsing as pp
from nmodl.terminals import NEURON, LBRACE, RBRACE, FLOAT, ID
SUFFIX = pp.Keyword('SUFFIX')
USEION = pp.Keyword('USEION')
READ = pp.Keyword('READ')
WRITE = pp.Keyword('WRITE')
RANGE = pp.Keyword('RANGE')
GLOBAL = pp.Keyword('GLOBAL')
POINTER = pp.Keyword('POINTER')
NONSPECIFIC = pp.Keyword('NONSPECIFIC_CURRENT')
EXTERNAL = pp.Keyword('EXTERNAL')
VALENCE = pp.Keyword('VALENCE')
suffix_stmt = (SUFFIX + ID)
global_stmt = (GLOBAL + pp.Group(pp.delimitedList(ID)))
range_stmt = RANGE + pp.Group(pp.delimitedList(ID))
pointer_stmt = (POINTER + pp.Group(pp.delimitedList(ID)))
ext_stmt = (EXTERNAL + pp.Group(pp.delimitedList(ID)))
nonspec_stmt = (NONSPECIFIC + pp.Group(pp.delimitedList(ID)))
read = READ + pp.Group(pp.delimitedList(ID))
write = WRITE + pp.Group(pp.delimitedList(ID))
valence = VALENCE + FLOAT
rwv = pp.Optional(read) & pp.Optional(write) & pp.Optional(valence)
useion_stmt = (USEION + ID + rwv)
neuron_stmt = pp.Group(suffix_stmt | global_stmt | range_stmt | pointer_stmt |
ext_stmt | nonspec_stmt | useion_stmt)
neuron_blk = NEURON + LBRACE + pp.ZeroOrMore(neuron_stmt) + RBRACE
| borismarin/nmodl-parse | nmodl/neuron.py | Python | gpl-3.0 | 1,140 | [
"NEURON"
] | 51c1ef62f861c7ec6cc00e4455c686c7ab6e2606c85acb71e83dc1941dd924ba |
#!/usr/bin/python
"""
Run VASP tests to ensure that relaxation with the VASP calculator works.
This is conditional on the existence of the VASP_COMMAND or VASP_SCRIPT
environment variables.
"""
from ase.test import NotAvailable
import os
vcmd = os.getenv('VASP_COMMAND')
vscr = os.getenv('VASP_SCRIPT')
if vcmd == None and vscr == None:
raise NotAvailable('Neither VASP_COMMAND nor VASP_SCRIPT defined')
import numpy as np
from ase import io
# QuasiNewton nowadays is an alias for BFGSLineSearch, which is
# broken. Use BFGS instead.
from ase.optimize import BFGS as QuasiNewton
from ase.lattice import bulk
from ase.calculators.vasp import Vasp
# -- Perform Volume relaxation within Vasp
def vasp_vol_relax():
Al = bulk('Al', 'fcc', a=4.5, cubic=True)
calc = Vasp(xc='LDA', isif=7, nsw=5,
ibrion=1, ediffg=-1e-3, lwave=False, lcharg=False)
calc.calculate(Al)
# Explicitly parse atomic position output file from Vasp
CONTCAR_Al = io.read('CONTCAR', format='vasp')
print 'Stress after relaxation:\n', calc.read_stress()
print 'Al cell post relaxation from calc:\n', calc.get_atoms().get_cell()
print 'Al cell post relaxation from atoms:\n', Al.get_cell()
print 'Al cell post relaxation from CONTCAR:\n', CONTCAR_Al.get_cell()
# All the cells should be the same.
assert (calc.get_atoms().get_cell() == CONTCAR_Al.get_cell()).all()
assert (Al.get_cell() == CONTCAR_Al.get_cell()).all()
return Al
# -- Perform Volume relaxation using ASE with Vasp as force/stress calculator
def ase_vol_relax():
Al = bulk('Al', 'fcc', a=4.5, cubic=True)
calc = Vasp(xc='LDA')
Al.set_calculator(calc)
from ase.constraints import StrainFilter
sf = StrainFilter(Al)
qn = QuasiNewton(sf, logfile='relaxation.log')
qn.run(fmax=0.1, steps=5)
print 'Stress:\n', calc.read_stress()
print 'Al post ASE volume relaxation\n', calc.get_atoms().get_cell()
return Al
# Test function for comparing two cells
def cells_almost_equal(cellA, cellB, tol=0.01):
return (np.abs(cellA - cellB) < tol).all()
# Correct LDA relaxed cell
a_rel = 4.18
LDA_cell = np.diag([a_rel, a_rel, a_rel])
Al_vasp = vasp_vol_relax()
Al_ase = ase_vol_relax()
assert cells_almost_equal(LDA_cell, Al_vasp.get_cell())
assert cells_almost_equal(LDA_cell, Al_ase.get_cell())
# Cleanup
Al_ase.get_calculator().clean()
| JConwayAWT/PGSS14CC | lib/python/multimetallics/ase/test/vasp/vasp_Al_volrelax.py | Python | gpl-2.0 | 2,388 | [
"ASE",
"VASP"
] | ee65a800f58c251e353064a6c7ab12c8a5eb416b311c79b5f900fc3a4c3c1342 |
#! /usr/bin/env python
import sys
from math import sqrt, pi
import numpy as np
from ase import Atoms
from ase.units import Bohr, Hartree
from ase.parallel import rank
from gpaw import GPAW
from gpaw.external_potential import ConstantElectricField
###
field = 0.01
dx = 2.0
vac = 3.0
nsteps = 3
nbands = 4
a0 = dx+2*vac
b0 = 2*vac
debug = False
if debug:
txt = 'gpaw.out'
else:
txt = None
###
a = Atoms('Be', positions=[ [ b0/2, b0/2, a0/2 ] ], cell=[ b0, b0, a0 ])
z_list = np.linspace(a0/2-dx/2, a0/2+dx/2, nsteps)
if True:
c = GPAW(
h = 0.30,
width = 0.0,
nbands = nbands,
spinpol = False,
xc = 'LDA',
txt = txt,
)
a.set_calculator(c)
###
e_no_field = [ ]
for z in z_list:
if rank == 0 and debug:
print z
a[0].z = z
e_no_field += [ a.get_potential_energy() ]
e_no_field = np.array(e_no_field)
###
if True:
c = GPAW(
h = 0.30,
width = 0.0,
nbands = nbands,
spinpol = False,
xc = 'LDA',
txt = txt,
external = ConstantElectricField(field * Bohr/Hartree)
)
a.set_calculator(c)
e_with_field = [ ]
for z in z_list:
if rank == 0 and debug:
print z
a[0].z = z
e_with_field += [ a.get_potential_energy() ]
e_with_field = np.array(e_with_field)
###
np.savetxt("e.out", np.transpose( [ z_list, e_with_field-e_no_field, e_no_field, e_with_field ] ))
c1, c2 = np.polyfit(z_list, e_with_field-e_no_field, 1)
# 4*field because the charge of the nuclei is not considered
# in ExternalPotential
err = abs(c1-a[0].number*field)
if rank == 0 and debug:
print c1
print err
assert err < 0.001
| ajylee/gpaw-rtxs | gpaw/test/constant_electric_field.py | Python | gpl-3.0 | 1,861 | [
"ASE",
"GPAW"
] | db842aab8e51aa1a071321ddef4fd282ee857ee618dea13afb98259e0f57ccc4 |
from __future__ import annotations
import json
import procrunner
import pytest
import iotbx.cif
from cctbx import sgtbx, uctbx
from dxtbx.model.experiment_list import ExperimentListFactory
from dxtbx.serialize import load
from iotbx import mtz
from dials.array_family import flex
from dials.command_line.slice_sequence import slice_experiments, slice_reflections
from dials.util.multi_dataset_handling import assign_unique_identifiers
def run_export(export_format, dials_data, tmp_path):
result = procrunner.run(
[
"dials.export",
"format=" + export_format,
dials_data("centroid_test_data", pathlib=True) / "experiments.json",
dials_data("centroid_test_data", pathlib=True) / "integrated.pickle",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / f"integrated.{export_format}").is_file()
def test_nxs(dials_data, tmp_path):
run_export("nxs", dials_data, tmp_path)
def test_mtz(dials_data, tmp_path):
result = procrunner.run(
[
"dials.export",
"format=mtz",
"project_name=ham",
"crystal_name=spam",
dials_data("centroid_test_data", pathlib=True) / "experiments.json",
dials_data("centroid_test_data", pathlib=True) / "integrated.pickle",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "integrated.mtz").is_file()
mtz_obj = mtz.object(str(tmp_path / "integrated.mtz"))
assert mtz_obj.crystals()[1].name() == "spam"
assert mtz_obj.crystals()[1].project_name() == "ham"
def test_mtz_recalculated_cell(dials_data, tmp_path):
# First run dials.two_theta_refine to ensure that the crystals have
# recalculated_unit_cell set
scaled_expt = (
dials_data("x4wide_processed", pathlib=True) / "AUTOMATIC_DEFAULT_scaled.expt"
)
scaled_refl = (
dials_data("x4wide_processed", pathlib=True) / "AUTOMATIC_DEFAULT_scaled.refl"
)
result = procrunner.run(
["dials.two_theta_refine", scaled_expt, scaled_refl],
working_directory=tmp_path,
)
assert (tmp_path / "refined_cell.expt").is_file()
refined_expt = load.experiment_list(
tmp_path / "refined_cell.expt", check_format=False
)
ttr_cell = refined_expt.crystals()[0].get_recalculated_unit_cell()
d_min = 1.3
result = procrunner.run(
[
"dials.export",
"format=mtz",
tmp_path / "refined_cell.expt",
scaled_refl,
f"d_min={d_min:f}",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.mtz").is_file()
# The resulting mtz should have the same unit cell set as the recalculated_unit_cell
# from dials.two_theta_refine
for ma in mtz.object(str(tmp_path / "scaled.mtz")).as_miller_arrays():
assert ttr_cell.parameters() == pytest.approx(ma.unit_cell().parameters())
assert ma.d_min() >= d_min
def test_mtz_best_unit_cell(dials_data, tmp_path):
scaled_expt = (
dials_data("x4wide_processed", pathlib=True) / "AUTOMATIC_DEFAULT_scaled.expt"
)
scaled_refl = (
dials_data("x4wide_processed", pathlib=True) / "AUTOMATIC_DEFAULT_scaled.refl"
)
best_unit_cell = uctbx.unit_cell((42, 42, 39, 90, 90, 90))
d_min = 1.5
result = procrunner.run(
[
"dials.export",
"format=mtz",
scaled_expt,
scaled_refl,
f"d_min={d_min:f}",
"best_unit_cell=%g,%g,%g,%g,%g,%g" % best_unit_cell.parameters(),
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.mtz").is_file()
# The resulting mtz should have the best_unit_cell as input to dials.export
for ma in mtz.object(str(tmp_path / "scaled.mtz")).as_miller_arrays():
assert best_unit_cell.parameters() == pytest.approx(ma.unit_cell().parameters())
assert ma.d_min() >= d_min
def test_multi_sequence_integrated_mtz(dials_data, tmp_path):
"""Test dials.export on multi-sequence integrated data."""
# first combine two integrated files
result = procrunner.run(
[
"dials.combine_experiments",
dials_data("multi_crystal_proteinase_k") / "experiments_1.json",
dials_data("multi_crystal_proteinase_k") / "reflections_1.pickle",
dials_data("multi_crystal_proteinase_k") / "experiments_2.json",
dials_data("multi_crystal_proteinase_k") / "reflections_2.pickle",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "combined.refl").is_file()
assert (tmp_path / "combined.expt").is_file()
# now export
result = procrunner.run(
[
"dials.export",
"format=mtz",
"mtz.hklout=integrated.mtz",
tmp_path / "combined.refl",
tmp_path / "combined.expt",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "integrated.mtz").is_file()
def test_mtz_multi_wavelength(dials_data, tmp_path):
"""Test multi-wavelength mtz export"""
# First make suitable input - multi datasets experiment list and reflection
# table with different wavelengths
mcp = dials_data("multi_crystal_proteinase_k", pathlib=True)
exp_1 = load.experiment_list(mcp / "experiments_1.json", check_format=False)
exp_2 = load.experiment_list(mcp / "experiments_2.json", check_format=False)
refl_1 = flex.reflection_table.from_file(mcp / "reflections_1.pickle")
refl_2 = flex.reflection_table.from_file(mcp / "reflections_2.pickle")
exp_1[0].beam.set_wavelength(0.5)
exp_2[0].beam.set_wavelength(1.0)
exp_1.extend(exp_2)
reflection_list = [refl_1, refl_2]
exps, refls = assign_unique_identifiers(exp_1, reflection_list)
joint_refl = flex.reflection_table()
for r in refls:
joint_refl.extend(r)
exps.as_json(tmp_path / "tmp_exp.expt")
joint_refl.as_file(tmp_path / "tmp_refl.refl")
# Now run
result = procrunner.run(
[
"dials.export",
tmp_path / "tmp_exp.expt",
tmp_path / "tmp_refl.refl",
"format=mtz",
"mtz.hklout=unmerged.mtz",
],
environment_override={"DIALS_EXPORT_DO_NOT_CHECK_FORMAT": "True"},
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "unmerged.mtz").is_file()
# Inspect output
m = mtz.object(str(tmp_path / "unmerged.mtz")).crystals()
n_batches = []
wavelengths = []
for crystal in m:
for dataset in crystal.datasets():
wavelengths.append(dataset.wavelength())
n_batches.append(dataset.n_batches())
assert n_batches == [0, 25, 25] # base, dataset1, dataset2
assert wavelengths == [0, 0.5, 1.0] # base, dataset1, dataset2
def test_mtz_primitive_cell(dials_data, tmp_path):
scaled_expt = dials_data("insulin_processed", pathlib=True) / "scaled.expt"
scaled_refl = dials_data("insulin_processed", pathlib=True) / "scaled.refl"
# First reindex to the primitive setting
expts = load.experiment_list(scaled_expt, check_format=False)
cs = expts[0].crystal.get_crystal_symmetry()
cb_op = cs.change_of_basis_op_to_primitive_setting()
procrunner.run(
[
"dials.reindex",
scaled_expt,
scaled_refl,
f'change_of_basis_op="{cb_op}"',
],
working_directory=tmp_path,
)
# Now export the reindexed experiments/reflections
procrunner.run(
["dials.export", tmp_path / "reindexed.expt", tmp_path / "reindexed.refl"],
working_directory=tmp_path,
)
mtz_obj = mtz.object(str(tmp_path / "scaled.mtz"))
cs_primitive = cs.change_basis(cb_op)
assert mtz_obj.space_group() == cs_primitive.space_group()
refl = flex.reflection_table.from_file(scaled_refl)
refl = refl.select(~refl.get_flags(refl.flags.bad_for_scaling, all=False))
for ma in mtz_obj.as_miller_arrays():
assert ma.crystal_symmetry().is_similar_symmetry(cs_primitive)
assert ma.d_max_min() == pytest.approx(
(flex.max(refl["d"]), flex.min(refl["d"]))
)
@pytest.mark.parametrize("compress", [None, "gz", "bz2", "xz"])
@pytest.mark.parametrize("hklout", [None, "my.cif"])
def test_mmcif(compress, hklout, dials_data, tmp_path):
# Call dials.export after integration
command = [
"dials.export",
"format=mmcif",
dials_data("centroid_test_data", pathlib=True) / "experiments.json",
dials_data("centroid_test_data", pathlib=True) / "integrated.pickle",
]
if hklout is not None:
command.append(f"mmcif.hklout={hklout}")
else:
hklout = "integrated.cif"
if compress is not None:
command.append(f"mmcif.compress={compress}")
hklin = hklout + "." + compress
else:
hklin = hklout
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / hklin).is_file()
@pytest.mark.parametrize("pdb_version", ["v5", "v5_next"])
def test_mmcif_on_scaled_data(dials_data, tmp_path, pdb_version):
"""Call dials.export format=mmcif after scaling"""
scaled_expt = (
dials_data("x4wide_processed", pathlib=True) / "AUTOMATIC_DEFAULT_scaled.expt"
)
scaled_refl = (
dials_data("x4wide_processed", pathlib=True) / "AUTOMATIC_DEFAULT_scaled.refl"
)
command = [
"dials.export",
"format=mmcif",
scaled_expt,
scaled_refl,
"mmcif.hklout=scaled.mmcif",
"compress=None",
f"pdb_version={pdb_version}",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.mmcif").is_file()
model = iotbx.cif.reader(file_path=str(tmp_path / "scaled.mmcif")).model()
if pdb_version == "v5":
assert "_pdbx_diffrn_data_section.id" not in model["dials"].keys()
elif pdb_version == "v5_next":
assert "_pdbx_diffrn_data_section.id" in model["dials"].keys()
def test_mmcif_p1_narrow_wedge(dials_data, tmp_path):
"""Call dials.export format=mmcif after scaling"""
data_dir = dials_data("x4wide_processed", pathlib=True)
refl = flex.reflection_table.from_file(data_dir / "AUTOMATIC_DEFAULT_scaled.refl")
refl = slice_reflections(refl, [(1, 3)])
refl.as_file(tmp_path / "p1_narrow.refl")
expts = load.experiment_list(
data_dir / "AUTOMATIC_DEFAULT_scaled.expt", check_format=False
)
expts = slice_experiments(expts, [(1, 3)])
expts[0].crystal.set_space_group(sgtbx.space_group())
expts.as_file(tmp_path / "p1_narrow.expt")
command = [
"dials.export",
"format=mmcif",
tmp_path / "p1_narrow.expt",
tmp_path / "p1_narrow.refl",
"mmcif.hklout=scaled.mmcif",
"compress=None",
]
result = procrunner.run(command, working_directory=tmp_path)
assert not result.returncode and not result.stderr
assert (tmp_path / "scaled.mmcif").is_file()
model = iotbx.cif.reader(file_path=str(tmp_path / "scaled.mmcif")).model()
assert model["dials"]["_reflns.pdbx_redundancy"] == "1.0"
assert model["dials"]["_reflns.pdbx_CC_half"] == "0.0"
def test_xds_ascii(dials_data, tmp_path):
# Call dials.export
result = procrunner.run(
[
"dials.export",
"intensity=sum",
"format=xds_ascii",
dials_data("centroid_test_data", pathlib=True) / "experiments.json",
dials_data("centroid_test_data", pathlib=True) / "integrated.pickle",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "DIALS.HKL").is_file()
psi_values = {
(-9, 7, -10): 153.430361,
(-5, 11, -26): 175.559441,
(-4, 23, 24): 129.468070,
(2, 10, 20): 147.947274,
}
with (tmp_path / "DIALS.HKL").open() as fh:
for record in fh:
if record.startswith("!"):
continue
tokens = record.split()
hkl = tuple(map(int, tokens[:3]))
if hkl not in psi_values:
continue
psi = float(tokens[-1])
assert psi == pytest.approx(psi_values[hkl], abs=0.1)
def test_sadabs(dials_data, tmp_path):
# Call dials.export
result = procrunner.run(
[
"dials.export",
"intensity=sum",
"mtz.partiality_threshold=0.99",
"format=sadabs",
dials_data("centroid_test_data", pathlib=True) / "experiments.json",
dials_data("centroid_test_data", pathlib=True) / "integrated.pickle",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "integrated.sad").is_file()
direction_cosines = {
(-9, 7, -10): (0.51253, -0.72107, 0.84696, -0.68476, -0.14130, -0.10561),
(-5, 11, -26): (0.51310, -0.62895, 0.84711, -0.59223, -0.13830, -0.50366),
(-4, 23, 24): (0.51308, -0.60578, 0.84711, -0.31416, -0.13840, 0.73099),
(2, 10, 20): (0.51239, -0.46605, 0.84693, -0.61521, -0.14204, 0.63586),
}
with (tmp_path / "integrated.sad").open() as fh:
for record in fh:
record = record.replace("-", " -")
tokens = record.split()
hkl = tuple(map(int, tokens[:3]))
cosines = tuple(map(float, tokens[6:12]))
if hkl not in direction_cosines:
continue
assert cosines == pytest.approx(direction_cosines[hkl], abs=0.001)
def test_json(dials_data, tmp_path):
# Call dials.export
result = procrunner.run(
[
"dials.export",
"format=json",
dials_data("centroid_test_data", pathlib=True)
/ "imported_experiments.json",
dials_data("centroid_test_data", pathlib=True) / "strong.pickle",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "rlp.json").is_file()
d = json.load((tmp_path / "rlp.json").open("rb"))
assert set(d) == {"imageset_id", "experiments", "rlp", "experiment_id"}
assert d["rlp"][:3] == [0.123413, 0.576679, 0.186326], d["rlp"][:3]
assert d["imageset_id"][0] == 0
assert d["experiment_id"][0] == 0
experiments = ExperimentListFactory.from_dict(d["experiments"])
imgset = experiments.imagesets()
assert len(imgset) == 1
def test_json_shortened(dials_data, tmp_path):
# Call dials.export
result = procrunner.run(
[
"dials.export",
"format=json",
dials_data("centroid_test_data", pathlib=True) / "experiments.json",
dials_data("centroid_test_data", pathlib=True) / "integrated.pickle",
"json.filename=integrated.json",
"n_digits=4",
"compact=False",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "integrated.json").is_file()
d = json.load((tmp_path / "integrated.json").open("rb"))
assert "imageset_id" in d
assert "rlp" in d
assert "experiment_id" in d
assert d["rlp"][:3] == [-0.5975, -0.6141, 0.4702], d["rlp"][:3]
assert d["imageset_id"][0] == 0
assert d["experiment_id"][0] == 0
def test_shelx(dials_data, tmp_path):
# Call dials.export
result = procrunner.run(
[
"dials.export",
"intensity=scale",
"format=shelx",
dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
/ "scaled_20_25.expt",
dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
/ "scaled_20_25.refl",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "dials.hkl").is_file()
intensities_sigmas = {
(4, 2, 4): (173.14, 15.39),
(3, -3, -3): (324.13, 25.92),
(4, 0, 2): (876.02, 69.34),
(3, -2, -1): (463.11, 36.76),
}
with (tmp_path / "dials.hkl").open() as fh:
max_intensity = -9999.0
for record in fh:
tokens = record.split()
hkl = tuple(map(int, tokens[:3]))
i_sigi = tuple(map(float, tokens[3:5]))
if hkl not in intensities_sigmas:
if i_sigi[0] > max_intensity:
max_intensity = i_sigi[0]
continue
assert i_sigi == pytest.approx(intensities_sigmas[hkl], abs=0.001)
assert max_intensity == pytest.approx(9999.00, abs=0.001)
def test_shelx_ins(dials_data, tmp_path):
# Call dials.export
result = procrunner.run(
[
"dials.export",
"intensity=scale",
"format=shelx",
dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
/ "scaled_20_25.expt",
dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
/ "scaled_20_25.refl",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "dials.ins").is_file()
cell_esds = {
"CELL": (5.4815, 8.2158, 12.1457, 90.000, 90.000, 90.000),
"ZERR": (0.0005, 0.0007, 0.0011, 0.003, 0.004, 0.004),
}
with (tmp_path / "dials.ins").open() as fh:
for line in fh:
tokens = line.split()
instruction = tokens[0]
if instruction in cell_esds:
result = tuple(map(float, tokens[2:8]))
assert result == pytest.approx(cell_esds[instruction], abs=0.001)
def test_shelx_ins_best_unit_cell(dials_data, tmp_path):
# Call dials.export
result = procrunner.run(
[
"dials.export",
"intensity=scale",
"format=shelx",
"best_unit_cell=5,8,12,90,90,90",
dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
/ "scaled_20_25.expt",
dials_data("l_cysteine_4_sweeps_scaled", pathlib=True)
/ "scaled_20_25.refl",
],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / "dials.ins").is_file()
cell_esds = {
"CELL": (5.0, 8.0, 12.0, 90.0, 90.0, 90.0),
}
with (tmp_path / "dials.ins").open() as fh:
for line in fh:
tokens = line.split()
instruction = tokens[0]
assert instruction != "ZERR"
if instruction in cell_esds:
result = tuple(map(float, tokens[2:8]))
assert result == pytest.approx(cell_esds[instruction], abs=0.001)
def test_export_sum_or_profile_only(dials_data, tmp_path):
expt = dials_data("insulin_processed", pathlib=True) / "integrated.expt"
refl = dials_data("insulin_processed", pathlib=True) / "integrated.refl"
for remove in "prf", "sum":
removed = tmp_path / f"removed_{remove}.refl"
data = flex.reflection_table.from_file(refl)
del data[f"intensity.{remove}.value"]
del data[f"intensity.{remove}.variance"]
data.as_file(removed)
result = procrunner.run(
["dials.export", expt, removed, f"mtz.hklout=removed_{remove}.mtz"],
working_directory=tmp_path,
)
assert not result.returncode and not result.stderr
assert (tmp_path / f"removed_{remove}.mtz").is_file()
| dials/dials | tests/command_line/test_export.py | Python | bsd-3-clause | 20,011 | [
"CRYSTAL"
] | 0a4c876874ac0f54fc26d2ade573a5bdd267498f0beb0563c67ffcc73bd630e8 |
# -*- coding: utf-8 -*-
#
# MNE documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 11 10:45:48 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import date
from distutils.version import LooseVersion
import os
import os.path as op
import sys
import time
import warnings
import sphinx_gallery
from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder
from numpydoc import docscrape
import matplotlib
import mne
from mne.viz import Brain
from mne.utils import (linkcode_resolve, # noqa, analysis:ignore
_assert_no_instances, sizeof_fmt)
if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'):
raise ImportError('Must have at least version 0.2 of sphinx-gallery, got '
'%s' % (sphinx_gallery.__version__,))
matplotlib.use('agg')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
curdir = os.path.dirname(__file__)
sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne')))
sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '2.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.graphviz',
'numpydoc',
'sphinx_gallery.gen_gallery',
'sphinx_fontawesome',
'gen_commands',
'gh_substitutions',
'mne_substitutions',
'sphinx_bootstrap_theme',
'sphinx_bootstrap_divs',
'sphinxcontrib.bibtex',
'sphinxcontrib.bibtex2',
]
linkcheck_ignore = [
'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa 403 Client Error: Forbidden for url: http://iopscience.iop.org/article/10.1088/0031-9155/57/7/1937/meta
'https://doi.org/10.1088/0031-9155/51/7/008', # noqa 403 Client Error: Forbidden for url: https://iopscience.iop.org/article/10.1088/0031-9155/51/7/008
'https://sccn.ucsd.edu/wiki/.*', # noqa HTTPSConnectionPool(host='sccn.ucsd.edu', port=443): Max retries exceeded with url: /wiki/Firfilt_FAQ (Caused by SSLError(SSLError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:847)'),))
'https://docs.python.org/dev/howto/logging.html', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))
'https://hal.archives-ouvertes.fr/hal-01848442/', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/
]
linkcheck_anchors = False # saves a bit of time
autosummary_generate = True
autodoc_default_options = {'inherited-members': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_includes']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MNE'
td = date.today()
copyright = u'2012-%s, MNE Developers. Last updated on %s' % (td.year,
td.isoformat())
nitpicky = True
nitpick_ignore = [
("py:class", "None. Remove all items from D."),
("py:class", "a set-like object providing a view on D's items"),
("py:class", "a set-like object providing a view on D's keys"),
("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501
("py:class", "None. Update D from dict/iterable E and F."),
("py:class", "an object providing a view on D's values"),
("py:class", "a shallow copy of D"),
("py:class", "(k, v), remove and return some (key, value) pair as a"),
]
for key in ('AcqParserFIF', 'BiHemiLabel', 'Dipole', 'DipoleFixed', 'Label',
'MixedSourceEstimate', 'MixedVectorSourceEstimate', 'Report',
'SourceEstimate', 'SourceMorph', 'VectorSourceEstimate',
'VolSourceEstimate', 'VolVectorSourceEstimate',
'channels.DigMontage', 'channels.Layout',
'decoding.CSP', 'decoding.EMS', 'decoding.FilterEstimator',
'decoding.GeneralizingEstimator', 'decoding.LinearModel',
'decoding.PSDEstimator', 'decoding.ReceptiveField', 'decoding.SSD',
'decoding.SPoC', 'decoding.Scaler', 'decoding.SlidingEstimator',
'decoding.TemporalFilter', 'decoding.TimeDelayingRidge',
'decoding.TimeFrequency', 'decoding.UnsupervisedSpatialFilter',
'decoding.Vectorizer',
'preprocessing.ICA', 'preprocessing.Xdawn',
'simulation.SourceSimulator',
'time_frequency.CrossSpectralDensity',
'utils.deprecated',
'viz.ClickableImage'):
nitpick_ignore.append(('py:obj', f'mne.{key}.__hash__'))
suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mne.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "py:obj"
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['mne.']
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'navbar_title': ' ', # we replace this with an image
'source_link_position': "nav", # default
'bootswatch_theme': "flatly", # yeti paper lumen
'navbar_sidebarrel': False, # Render the next/prev links in navbar?
'navbar_pagenav': False,
'navbar_class': "navbar",
'bootstrap_version': "3", # default
'navbar_links': [
("Install", "install/index"),
("Overview", "overview/index"),
("Tutorials", "auto_tutorials/index"),
("Examples", "auto_examples/index"),
("Glossary", "glossary"),
("API", "python_reference"),
],
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/mne_logo_small.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
html_extra_path = [
'contributing.html',
'documentation.html',
'getting_started.html',
'install_mne_python.html',
]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
html_copy_source = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# variables to pass to HTML templating engine
build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False)))
html_context = {'use_google_analytics': True,
'use_media_buttons': True, 'build_dev_html': build_dev_html}
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mne-doc'
# -- Options for LaTeX output ---------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
# ('index', 'MNE.tex', u'MNE Manual',
# u'MNE Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_toplevel_sectioning = 'part'
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
trim_doctests_flags = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org', None),
'sklearn': ('https://scikit-learn.org/stable', None),
'numba': ('https://numba.pydata.org/numba-doc/latest', None),
'joblib': ('https://joblib.readthedocs.io/en/latest', None),
'mayavi': ('http://docs.enthought.com/mayavi/mayavi', None),
'nibabel': ('https://nipy.org/nibabel', None),
'nilearn': ('http://nilearn.github.io', None),
'surfer': ('https://pysurfer.github.io/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
'seaborn': ('https://seaborn.pydata.org/', None),
'statsmodels': ('https://www.statsmodels.org/dev', None),
'patsy': ('https://patsy.readthedocs.io/en/latest', None),
'pyvista': ('https://docs.pyvista.org', None),
'imageio': ('https://imageio.readthedocs.io/en/latest', None),
# We need to stick with 1.2.0 for now:
# https://github.com/dipy/dipy/issues/2290
'dipy': ('https://dipy.org/documentation/1.2.0.', None),
'mne_realtime': ('https://mne.tools/mne-realtime', None),
'picard': ('https://pierreablin.github.io/picard/', None),
}
##############################################################################
# sphinxcontrib-bibtex
bibtex_bibfiles = ['./references.bib']
bibtex_style = 'unsrt'
bibtex_footbibliography_header = ''
##############################################################################
# sphinx-gallery
examples_dirs = ['../tutorials', '../examples']
gallery_dirs = ['auto_tutorials', 'auto_examples']
os.environ['_MNE_BUILDING_DOC'] = 'true'
scrapers = ('matplotlib',)
try:
mlab = mne.utils._import_mlab()
# Do not pop up any mayavi windows while running the
# examples. These are very annoying since they steal the focus.
mlab.options.offscreen = True
# hack to initialize the Mayavi Engine
mlab.test_plot3d()
mlab.close()
except Exception:
pass
else:
scrapers += ('mayavi',)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pyvista
pyvista.OFF_SCREEN = False
except Exception:
pass
else:
scrapers += ('pyvista',)
if any(x in scrapers for x in ('pyvista', 'mayavi')):
from traits.api import push_exception_handler
push_exception_handler(reraise_exceptions=True)
report_scraper = mne.report._ReportScraper()
scrapers += (report_scraper,)
else:
report_scraper = None
if 'pyvista' in scrapers:
brain_scraper = mne.viz._brain._BrainScraper()
scrapers = list(scrapers)
scrapers.insert(scrapers.index('pyvista'), brain_scraper)
scrapers = tuple(scrapers)
def append_attr_meth_examples(app, what, name, obj, options, lines):
"""Append SG examples backreferences to method and attr docstrings."""
# NumpyDoc nicely embeds method and attribute docstrings for us, but it
# does not respect the autodoc templates that would otherwise insert
# the .. include:: lines, so we need to do it.
# Eventually this could perhaps live in SG.
if what in ('attribute', 'method'):
size = os.path.getsize(op.join(
op.dirname(__file__), 'generated', '%s.examples' % (name,)))
if size > 0:
lines += """
.. _sphx_glr_backreferences_{1}:
.. rubric:: Examples using ``{0}``:
.. minigallery:: {1}
""".format(name.split('.')[-1], name).split('\n')
def setup(app):
"""Set up the Sphinx app."""
app.connect('autodoc-process-docstring', append_attr_meth_examples)
if report_scraper is not None:
report_scraper.app = app
app.connect('build-finished', report_scraper.copyfiles)
class Resetter(object):
"""Simple class to make the str(obj) static for Sphinx build env hash."""
def __init__(self):
self.t0 = time.time()
def __repr__(self):
return '<%s>' % (self.__class__.__name__,)
def __call__(self, gallery_conf, fname):
import matplotlib.pyplot as plt
try:
from pyvista import Plotter
except ImportError:
Plotter = None
reset_warnings(gallery_conf, fname)
# in case users have interactive mode turned on in matplotlibrc,
# turn it off here (otherwise the build can be very slow)
plt.ioff()
plt.rcParams['animation.embed_limit'] = 30.
_assert_no_instances(Brain, 'running') # calls gc.collect()
if Plotter is not None:
_assert_no_instances(Plotter, 'running')
# This will overwrite some Sphinx printing but it's useful
# for memory timestamps
if os.getenv('SG_STAMP_STARTS', '').lower() == 'true':
import psutil
process = psutil.Process(os.getpid())
mem = sizeof_fmt(process.memory_info().rss)
print(f'{time.time() - self.t0:6.1f} s : {mem}'.ljust(22))
def reset_warnings(gallery_conf, fname):
"""Ensure we are future compatible and ignore silly warnings."""
# In principle, our examples should produce no warnings.
# Here we cause warnings to become errors, with a few exceptions.
# This list should be considered alongside
# setup.cfg -> [tool:pytest] -> filterwarnings
# remove tweaks from other module imports or example runs
warnings.resetwarnings()
# restrict
warnings.filterwarnings('error')
# allow these, but show them
warnings.filterwarnings('always', '.*non-standard config type: "foo".*')
warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*')
warnings.filterwarnings('always', '.*cannot make axes width small.*')
warnings.filterwarnings('always', '.*Axes that are not compatible.*')
warnings.filterwarnings('always', '.*FastICA did not converge.*')
warnings.filterwarnings( # xhemi morph (should probably update sample)
'always', '.*does not exist, creating it and saving it.*')
warnings.filterwarnings('default', module='sphinx') # internal warnings
warnings.filterwarnings(
'always', '.*converting a masked element to nan.*') # matplotlib?
# allow these warnings, but don't show them
warnings.filterwarnings(
'ignore', '.*OpenSSL\\.rand is deprecated.*')
warnings.filterwarnings('ignore', '.*is currently using agg.*')
warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it)
'ignore', '.*the matrix subclass is not the recommended.*')
warnings.filterwarnings( # some joblib warning
'ignore', '.*semaphore_tracker: process died unexpectedly.*')
warnings.filterwarnings( # needed until SciPy 1.2.0 is released
'ignore', '.*will be interpreted as an array index.*', module='scipy')
for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads',
'Using or importing the ABCs from', # internal modules on 3.7
r"it will be an error for 'np\.bool_'", # ndimage
"DocumenterBridge requires a state object", # sphinx dev
"'U' mode is deprecated", # sphinx io
r"joblib is deprecated in 0\.21", # nilearn
'The usage of `cmp` is deprecated and will', # sklearn/pytest
'scipy.* is deprecated and will be removed in', # dipy
r'Converting `np\.character` to a dtype is deprecated', # vtk
r'sphinx\.util\.smartypants is deprecated',
'is a deprecated alias for the builtin', # NumPy
):
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*%s.*" % key, category=DeprecationWarning)
warnings.filterwarnings( # deal with bootstrap-theme bug
'ignore', message=".*modify script_files in the theme.*",
category=Warning)
warnings.filterwarnings( # nilearn
'ignore', message=r'sklearn\.externals\.joblib is deprecated.*',
category=FutureWarning)
warnings.filterwarnings( # nilearn
'ignore', message=r'The sklearn.* module is.*', category=FutureWarning)
warnings.filterwarnings( # deal with other modules having bad imports
'ignore', message=".*ufunc size changed.*", category=RuntimeWarning)
warnings.filterwarnings( # realtime
'ignore', message=".*unclosed file.*", category=ResourceWarning)
warnings.filterwarnings('ignore', message='Exception ignored in.*')
# allow this ImportWarning, but don't show it
warnings.filterwarnings(
'ignore', message="can't resolve package from", category=ImportWarning)
warnings.filterwarnings(
'ignore', message='.*mne-realtime.*', category=DeprecationWarning)
reset_warnings(None, None)
sphinx_gallery_conf = {
'doc_module': ('mne',),
'reference_url': dict(mne=None),
'examples_dirs': examples_dirs,
'subsection_order': ExplicitOrder(['../examples/io/',
'../examples/simulation/',
'../examples/preprocessing/',
'../examples/visualization/',
'../examples/time_frequency/',
'../examples/stats/',
'../examples/decoding/',
'../examples/connectivity/',
'../examples/forward/',
'../examples/inverse/',
'../examples/realtime/',
'../examples/datasets/',
'../tutorials/intro/',
'../tutorials/io/',
'../tutorials/raw/',
'../tutorials/preprocessing/',
'../tutorials/epochs/',
'../tutorials/evoked/',
'../tutorials/time-freq/',
'../tutorials/source-modeling/',
'../tutorials/stats-sensor-space/',
'../tutorials/stats-source-space/',
'../tutorials/machine-learning/',
'../tutorials/simulation/',
'../tutorials/sample-datasets/',
'../tutorials/discussions/',
'../tutorials/misc/']),
'gallery_dirs': gallery_dirs,
'default_thumb_file': os.path.join('_static', 'mne_helmet.png'),
'backreferences_dir': 'generated',
'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning
'download_section_examples': False,
'thumbnail_size': (160, 112),
'remove_config_comments': True,
'min_reported_time': 1.,
'abort_on_example_error': False,
'reset_modules': ('matplotlib', Resetter()), # called w/each script
'image_scrapers': scrapers,
'show_memory': not sys.platform.startswith('win'),
'line_numbers': False, # XXX currently (0.3.dev0) messes with style
'within_subsection_order': FileNameSortKey,
'capture_repr': ('_repr_html_',),
'junit': op.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'),
'matplotlib_animations': True,
'compress_images': ('images', 'thumbnails'),
}
##############################################################################
# numpydoc
# XXX This hack defines what extra methods numpydoc will document
docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members
numpydoc_class_members_toctree = False
numpydoc_attributes_as_param_list = True
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
# Python
'file-like': ':term:`file-like <python:file object>`',
# Matplotlib
'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`',
'color': ':doc:`color <matplotlib:api/colors_api>`',
'collection': ':doc:`collections <matplotlib:api/collections_api>`',
'Axes': 'matplotlib.axes.Axes',
'Figure': 'matplotlib.figure.Figure',
'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D',
'ColorbarBase': 'matplotlib.colorbar.ColorbarBase',
# Mayavi
'mayavi.mlab.Figure': 'mayavi.core.api.Scene',
'mlab.Figure': 'mayavi.core.api.Scene',
# sklearn
'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut',
# joblib
'joblib.Parallel': 'joblib.Parallel',
# nibabel
'Nifti1Image': 'nibabel.nifti1.Nifti1Image',
'Nifti2Image': 'nibabel.nifti2.Nifti2Image',
'SpatialImage': 'nibabel.spatialimages.SpatialImage',
# MNE
'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked',
'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces',
'SourceMorph': 'mne.SourceMorph',
'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout',
'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel',
'AverageTFR': 'mne.time_frequency.AverageTFR',
'EpochsTFR': 'mne.time_frequency.EpochsTFR',
'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA',
'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations',
'DigMontage': 'mne.channels.DigMontage',
'VectorSourceEstimate': 'mne.VectorSourceEstimate',
'VolSourceEstimate': 'mne.VolSourceEstimate',
'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate',
'MixedSourceEstimate': 'mne.MixedSourceEstimate',
'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate',
'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection',
'ConductorModel': 'mne.bem.ConductorModel',
'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed',
'InverseOperator': 'mne.minimum_norm.InverseOperator',
'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity',
'SourceMorph': 'mne.SourceMorph',
'Xdawn': 'mne.preprocessing.Xdawn',
'Report': 'mne.Report', 'Forward': 'mne.Forward',
'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge',
'Vectorizer': 'mne.decoding.Vectorizer',
'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter',
'TemporalFilter': 'mne.decoding.TemporalFilter',
'SSD': 'mne.decoding.SSD',
'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC',
'PSDEstimator': 'mne.decoding.PSDEstimator',
'LinearModel': 'mne.decoding.LinearModel',
'FilterEstimator': 'mne.decoding.FilterEstimator',
'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP',
'Beamformer': 'mne.beamformer.Beamformer',
'Transform': 'mne.transforms.Transform',
}
numpydoc_xref_ignore = {
# words
'instance', 'instances', 'of', 'default', 'shape', 'or',
'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in',
'dtype', 'object', 'self.verbose',
# shapes
'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors',
'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups',
'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers',
'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q',
'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests',
'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features',
'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in',
'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks',
'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids',
'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out',
'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv',
'n_dipoles_fwd', 'n_picks_ref', 'n_coords',
# Undocumented (on purpose)
'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi',
'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY',
'RawPersyst', 'RawNihon',
# sklearn subclasses
'mapping', 'to', 'any',
# unlinkable
'mayavi.mlab.pipeline.surface',
'CoregFrame', 'Kit2FiffFrame', 'FiducialsFrame',
}
| olafhauk/mne-python | doc/conf.py | Python | bsd-3-clause | 28,784 | [
"Mayavi",
"VTK"
] | ac594740a4eaf0ad5892cefa7a3dce2b13d8a381e3083efc7a453d1ac733daa1 |
from PyQt4 import QtCore, QtGui
import re
from config import Settings
import util
import hashlib
from client import ClientState
class LoginWizard(QtGui.QWizard):
def __init__(self, client):
QtGui.QWizard.__init__(self)
self.client = client
self.login = client.login
self.password = client.password
self.addPage(loginPage(self))
self.setWizardStyle(QtGui.QWizard.ModernStyle)
self.setModal(True)
buttons_layout = [
QtGui.QWizard.CancelButton,
QtGui.QWizard.FinishButton
]
self.setButtonLayout(buttons_layout)
self.setWindowTitle("Login")
def accept(self):
self.login = self.field("login").strip()
if (self.field("password") != "!!!password!!!"): #Not entirely nicely coded, this can go into a lambda function connected to the LineEdit
self.password = hashlib.sha256(self.field("password").strip().encode("utf-8")).hexdigest()
self.client.login = self.field("login").strip()
self.client.password = self.password #this is the hash, not the dummy password
self.client.remember = self.field("remember")
self.client.autologin = self.field("autologin")
QtGui.QWizard.accept(self)
def reject(self):
QtGui.QWizard.reject(self)
class loginPage(QtGui.QWizardPage):
def __init__(self, parent=None, *args, **kwargs):
QtGui.QWizardPage.__init__(self, *args, **kwargs)
self.parent= parent
self.client = parent.client
self.setButtonText(QtGui.QWizard.CancelButton, "Quit")
self.setButtonText(QtGui.QWizard.FinishButton, "Login")
self.setTitle("ACU ready for combat.")
self.setSubTitle("Log yourself in, commander.")
self.setPixmap(QtGui.QWizard.WatermarkPixmap, util.pixmap("client/login_watermark.png"))
loginLabel = QtGui.QLabel("&User name :")
self.loginLineEdit = QtGui.QLineEdit()
loginLabel.setBuddy(self.loginLineEdit)
self.loginLineEdit.setText(self.client.login)
passwordLabel = QtGui.QLabel("&Password :")
self.passwordLineEdit = QtGui.QLineEdit()
passwordLabel.setBuddy(self.passwordLineEdit)
self.passwordLineEdit.setEchoMode(QtGui.QLineEdit.Password)
if (self.client.password):
self.passwordLineEdit.setText("!!!password!!!")
self.passwordLineEdit.selectionChanged.connect(self.passwordLineEdit.clear)
self.rememberCheckBox = QtGui.QCheckBox("&Remember password")
self.rememberCheckBox.setChecked(self.client.remember)
self.autologinCheckBox = QtGui.QCheckBox("&Automatic Login")
self.autologinCheckBox.setChecked(self.client.autologin)
self.autologinCheckBox.setEnabled(self.client.remember)
self.rememberCheckBox.clicked.connect(self.rememberCheck)
self.rememberCheckBox.clicked.connect(self.autologinCheckBox.setChecked)
self.rememberCheckBox.clicked.connect(self.autologinCheckBox.setEnabled)
self.createAccountBtn = QtGui.QPushButton("Create new Account")
self.renameAccountBtn = QtGui.QPushButton("Rename your account")
self.linkAccountBtn = QtGui.QPushButton("Link your account to Steam")
self.forgotPasswordBtn = QtGui.QPushButton("Forgot Login or Password")
self.reportBugBtn = QtGui.QPushButton("Report a Bug")
self.createAccountBtn.released.connect(self.createAccount)
self.renameAccountBtn.released.connect(self.renameAccount)
self.linkAccountBtn.released.connect(self.linkAccount)
self.forgotPasswordBtn.released.connect(self.forgotPassword)
self.reportBugBtn.released.connect(self.reportBug)
self.registerField('login', self.loginLineEdit)
self.registerField('password', self.passwordLineEdit)
self.registerField('remember', self.rememberCheckBox)
self.registerField('autologin', self.autologinCheckBox)
layout = QtGui.QGridLayout()
layout.addWidget(loginLabel, 1, 0)
layout.addWidget(self.loginLineEdit, 1, 1)
layout.addWidget(passwordLabel, 2, 0)
layout.addWidget(self.passwordLineEdit, 2, 1)
layout.addWidget(self.rememberCheckBox, 3, 0, 1, 3)
layout.addWidget(self.autologinCheckBox, 4, 0, 1, 3)
layout.addWidget(self.createAccountBtn, 5, 0, 1, 3)
layout.addWidget(self.renameAccountBtn, 6, 0, 1, 3)
layout.addWidget(self.linkAccountBtn, 7, 0, 1, 3)
layout.addWidget(self.forgotPasswordBtn, 8, 0, 1, 3)
layout.addWidget(self.reportBugBtn, 10, 0, 1, 3)
self.setLayout(layout)
def rememberCheck(self):
self.client.remember = self.rememberCheckBox.isChecked()
@QtCore.pyqtSlot()
def createAccount(self):
wizard = creationAccountWizard(self)
if wizard.exec_():
#Re-load credentials after successful creation.
self.loginLineEdit.setText(self.client.login)
self.setField('password', "!!!password!!!")
self.parent.password = self.client.password # This is needed because we're writing the field in accept()
@QtCore.pyqtSlot()
def linkAccount(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(Settings.get("STEAMLINK_URL")))
@QtCore.pyqtSlot()
def renameAccount(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(Settings.get("NAME_CHANGE_URL")))
@QtCore.pyqtSlot()
def forgotPassword(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(Settings.get("PASSWORD_RECOVERY_URL")))
@QtCore.pyqtSlot()
def reportBug(self):
QtGui.QDesktopServices.openUrl(QtCore.QUrl(Settings.get("TICKET_URL")))
class creationAccountWizard(QtGui.QWizard):
def __init__(self, parent=None):
super(creationAccountWizard, self).__init__(parent)
self.client = parent.client
self.setOption(QtGui.QWizard.DisabledBackButtonOnLastPage)
self.addPage(IntroPage())
self.addPage(AccountCreationPage(self))
self.addPage(AccountCreated())
self.setWizardStyle(QtGui.QWizard.ModernStyle)
self.setPixmap(QtGui.QWizard.BannerPixmap,
QtGui.QPixmap('client/banner.png'))
self.setPixmap(QtGui.QWizard.BackgroundPixmap,
QtGui.QPixmap('client/background.png'))
self.setWindowTitle("Create Account")
class gameSettingsWizard(QtGui.QWizard):
def __init__(self, client, *args, **kwargs):
QtGui.QWizard.__init__(self, *args, **kwargs)
self.client = client
self.settings = GameSettings()
self.settings.gamePortSpin.setValue(self.client.gamePort)
self.settings.checkUPnP.setChecked(self.client.useUPnP)
self.addPage(self.settings)
self.setWizardStyle(1)
self.setPixmap(QtGui.QWizard.BannerPixmap,
QtGui.QPixmap('client/banner.png'))
self.setPixmap(QtGui.QWizard.BackgroundPixmap,
QtGui.QPixmap('client/background.png'))
self.setWindowTitle("Set Game Port")
def accept(self):
self.client.gamePort = self.settings.gamePortSpin.value()
self.client.useUPnP = self.settings.checkUPnP.isChecked()
self.client.savePort()
QtGui.QWizard.accept(self)
class mumbleOptionsWizard(QtGui.QWizard):
def __init__(self, client, *args, **kwargs):
QtGui.QWizard.__init__(self, *args, **kwargs)
self.client = client
self.settings = MumbleSettings()
self.settings.checkEnableMumble.setChecked(self.client.enableMumble)
self.addPage(self.settings)
self.setWizardStyle(1)
self.setPixmap(QtGui.QWizard.BannerPixmap,
QtGui.QPixmap('client/banner.png'))
self.setPixmap(QtGui.QWizard.BackgroundPixmap,
QtGui.QPixmap('client/background.png'))
self.setWindowTitle("Configure Voice")
def accept(self):
self.client.enableMumble = self.settings.checkEnableMumble.isChecked()
self.client.saveMumble()
QtGui.QWizard.accept(self)
class IntroPage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(IntroPage, self).__init__(parent)
self.setTitle("Welcome to FA Forever.")
self.setSubTitle("In order to play, you first need to create an account.")
self.setPixmap(QtGui.QWizard.WatermarkPixmap, util.pixmap("client/account_watermark_intro.png"))
label = QtGui.QLabel("This wizard will help you in the process of account creation.<br/><br/><b>At this time, we only allow one account per computer.</b>")
label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(label)
self.setLayout(layout)
class AccountCreationPage(QtGui.QWizardPage):
def __init__(self, parent=None):
super(AccountCreationPage, self).__init__(parent)
self.parent = parent
self.client = parent.client
self.setTitle("Account Creation")
self.setSubTitle("Please enter your desired login and password. Note that your password will not be stored on our server. Please specify a working email address in case you need to change it.")
self.setPixmap(QtGui.QWizard.WatermarkPixmap, util.pixmap("client/account_watermark_input.png"))
loginLabel = QtGui.QLabel("&User name :")
self.loginLineEdit = QtGui.QLineEdit()
rxLog = QtCore.QRegExp("[A-Z,a-z]{1}[A-Z,a-z,0-9,_,-]{0,15}")
validLog = QtGui.QRegExpValidator(rxLog, self)
self.loginLineEdit.setValidator(validLog)
loginLabel.setBuddy(self.loginLineEdit)
passwordLabel = QtGui.QLabel("&Password :")
self.passwordLineEdit = QtGui.QLineEdit()
passwordLabel.setBuddy(self.passwordLineEdit)
self.passwordLineEdit.setEchoMode(2)
passwordCheckLabel = QtGui.QLabel("&Re-type Password :")
self.passwordCheckLineEdit = QtGui.QLineEdit()
passwordCheckLabel.setBuddy(self.passwordCheckLineEdit)
self.passwordCheckLineEdit.setEchoMode(2)
EmailLabel = QtGui.QLabel("E-mail :")
self.EmailLineEdit = QtGui.QLineEdit()
rxMail = QtCore.QRegExp("^[a-zA-Z0-9]{1}[a-zA-Z0-9._%-]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$")
validMail = QtGui.QRegExpValidator(rxMail, self)
self.EmailLineEdit.setValidator(validMail)
self.registerField('login*', self.loginLineEdit)
self.registerField('password*', self.passwordLineEdit)
self.registerField('passwordCheck*', self.passwordCheckLineEdit)
self.registerField('email*', self.EmailLineEdit)
layout = QtGui.QGridLayout()
layout.addWidget(loginLabel, 1, 0)
layout.addWidget(self.loginLineEdit, 1, 1)
layout.addWidget(passwordLabel, 2, 0)
layout.addWidget(self.passwordLineEdit, 2, 1)
layout.addWidget(passwordCheckLabel, 3, 0)
layout.addWidget(self.passwordCheckLineEdit, 3, 1)
layout.addWidget(EmailLabel, 4, 0)
layout.addWidget(self.EmailLineEdit, 4, 1)
self.setLayout(layout)
#
def validateEmail(self, email):
return re.match("^[a-zA-Z0-9._%-]+@[a-zA-Z0-9._%-]+.[a-zA-Z]{2,6}$", email) is not None
def validatePage(self):
password = self.passwordLineEdit.text().encode("utf-8")
confim_password = self.passwordCheckLineEdit.text().encode("utf-8")
if password != confim_password:
QtGui.QMessageBox.information(self, "Create account","Passwords don't match!")
return False
# Hashing the password client-side is not an effective way of ensuring security, but now we
# have a database full of sha256(password) we have to start considering sha256(password) to
# _be_ the user's password, and enforce a saner policy atop this.
#
# Soon. We promise. Hopefully before large scale identity theft takes place.
hashed_password = hashlib.sha256(password.encode("utf-8")).hexdigest()
email = self.EmailLineEdit.text()
if not self.validateEmail(email) :
QtGui.QMessageBox.information(self, "Create account", "Invalid Email address!")
return False
login = self.loginLineEdit.text().strip()
self.client.send({
"command": "create_account",
"login": login,
"email": email,
"password": hashed_password
})
# Wait for client state to change.
util.wait(lambda: self.client.state)
if self.client.state == ClientState.REJECTED:
QtGui.QMessageBox.information(self, "Create account", "Sorry, this Login is not available, or the email address was already used.")
return False
else:
self.client.login = login
self.client.password = hashed_password
return True
class GameSettings(QtGui.QWizardPage):
def __init__(self, parent=None):
super(GameSettings, self).__init__(parent)
self.parent = parent
self.setTitle("Network Settings")
self.setPixmap(QtGui.QWizard.WatermarkPixmap, util.pixmap("client/settings_watermark.png"))
self.label = QtGui.QLabel()
self.label.setText('Forged Alliance needs an open UDP port to play. If you have trouble connecting to other players, try the UPnP option first. If that fails, you should try to open or forward the port on your router and firewall.<br/><br/>Visit the <a href="http://forums.faforever.com/forums/viewforum.php?f=3">Tech Support Forum</a> if you need help.<br/><br/>')
self.label.setOpenExternalLinks(True)
self.label.setWordWrap(True)
self.labelport = QtGui.QLabel()
self.labelport.setText("<b>UDP Port</b> (default 6112)")
self.labelport.setWordWrap(True)
self.gamePortSpin = QtGui.QSpinBox()
self.gamePortSpin.setMinimum(1024)
self.gamePortSpin.setMaximum(65535)
self.gamePortSpin.setValue(6112)
self.checkUPnP = QtGui.QCheckBox("use UPnP")
self.checkUPnP.setToolTip("FAF can try to open and forward your game port automatically using UPnP.<br/><b>Caution: This doesn't work for all connections, but may help with some routers.</b>")
layout = QtGui.QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.labelport)
layout.addWidget(self.gamePortSpin)
layout.addWidget(self.checkUPnP)
self.setLayout(layout)
def validatePage(self):
return 1
class MumbleSettings(QtGui.QWizardPage):
def __init__(self, parent=None):
super(MumbleSettings, self).__init__(parent)
self.parent = parent
self.setTitle("Voice Settings")
self.setPixmap(QtGui.QWizard.WatermarkPixmap, util.pixmap("client/settings_watermark.png"))
self.label = QtGui.QLabel()
self.label.setText('FAF supports the automatic setup of voice connections between you and your team mates. It will automatically move you into a channel with your team mates anytime you enter a game lobby or start a game. To enable, download and install <a href="http://mumble.sourceforge.net/">Mumble</a> and tick the checkbox below.')
self.label.setOpenExternalLinks(True)
self.label.setWordWrap(True)
self.checkEnableMumble = QtGui.QCheckBox("Enable Mumble Connector")
layout = QtGui.QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.checkEnableMumble)
self.setLayout(layout)
def validatePage(self):
return 1
class AccountCreated(QtGui.QWizardPage):
def __init__(self, *args, **kwargs):
QtGui.QWizardPage.__init__(self, *args, **kwargs)
self.setFinalPage(True)
self.setTitle("Congratulations!")
self.setSubTitle("Your Account has been created.")
self.setPixmap(QtGui.QWizard.WatermarkPixmap, util.pixmap("client/account_watermark_created.png"))
self.label = QtGui.QLabel()
self.label.setWordWrap(True)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.label)
self.setLayout(layout)
def initializePage(self):
self.label.setText("You will be redirected to the login page.")
| Sheeo/client | src/client/loginwizards.py | Python | gpl-3.0 | 16,575 | [
"VisIt"
] | 6c10e46d1d90446722475c113a8192c83ce4a74ba6c256b36f22b1ed78a42f15 |
from __future__ import print_function
from bose_einstein import bose_einstein
from constant import htr_to_meV
import argparser
import mass_factor
import numpy as np
args = argparser.read_argument('Renormalize EPW calculation')
if args.vb: offset = -8.75333295715961e-03
else: offset = 8.53193322468371e-03
if args.vb: band_str = '36'
else: band_str = '37'
temp_str = '%03dK' % args.temp
if args.acoustic:
rng_qpt = range(8000, 10001, 500)
elif args.temp == 1:
rng_qpt = range(40000, 50001, 1000)
elif args.temp == 150:
rng_qpt = range(80000, 100001, 5000)
elif args.temp == 300:
rng_qpt = range(80000, 100001, 5000)
else:
print("temperature " + str(args.temp) + " not available")
exit()
dir_str = 'gx'
for qpt in rng_qpt:
qpt_str = '%06d' % qpt
if args.acoustic:
temp_str = '%dK' % args.temp
qpt_str = str(qpt)
filename = 'data/epw_all_28424_'+temp_str+'_5meV_acoustic_only/data_'+dir_str+'_'+band_str+'_'+qpt_str+'.dat'
else:
filename = 'data/res_'+temp_str+'_1meV/data_'+dir_str+'_'+band_str+'_'+qpt_str+'.dat'
file_epw = open(filename, 'r')
line = file_epw.readline()
data = line.split()
lam = np.float(data[4])
mf = mass_factor.eval(lam, args.method)
print(args.temp, mf, lam)
| mmdg-oxford/papers | Schlipf-PRL-2018/model/epw_mass.py | Python | gpl-3.0 | 1,237 | [
"EPW"
] | f5b89ca0419b533ed854f4ca47bca338ff558d74ce4f3e3697b18b3dd80ab096 |
import os
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
import tensorflow as tf
os.environ['KERAS_BACKEND'] = 'tensorflow'
import keras
from packaging import version
assert version.parse(keras.__version__) >= version.parse("2.2.0"), \
"Keras version too old for the autoencoder, need at least 2.2.x"
from keras import backend as K
logger.info(f"Backend: {K.backend()}")
assert K.backend() == "tensorflow", f"Keras should use the tensorflow backend, not {K.backend()}"
from keras.layers import Lambda, Input, Dense
from keras.models import Model
from keras.losses import mse, binary_crossentropy
# plot_model requires that pydot is installed
#from keras.utils import plot_model
#from toolbox import toolbox
from network import VariationalAutoencoder
from network.keras_tensorflow import Network as KerasNetwork
class KerasAutoencoder(VariationalAutoencoder, KerasNetwork):
"""A (variational) autoencoder implemented in Keras.
Attributes
----------
_vae
_encoder
_decoder
_inputs
_outputs
_epochs
_batch_size
_weights_file
_mse
"""
def __init__(self, original_dim, *args, intermediate_dim: int=512,
latent_dim: int=2, loss: str='mse', **kwargs):
"""Construct a new, fully connected (dense) autoencoder.
Both, encoder and decoder, will have one itermediate layer
of the given dimension.
"""
logger.info(f"New VAE: {original_dim}/{intermediate_dim}/{latent_dim}")
super().__init__(*args, **kwargs)
self._original_dim = original_dim
self._intermediate_dim = intermediate_dim
self._latent_dim = latent_dim
self._loss = loss
def _compute_layer_ids(self):
return [] # FIXME[concept]: what layer ids do we want to provide here?
def _prepare(self):
super()._prepare()
# network parameters
input_shape = (self._original_dim, )
# VAE model = encoder + decoder
with self._graph.as_default():
#
# (1) build encoder model
#
self._inputs = Input(shape=input_shape, name='encoder_input')
print("intput_shape:", input_shape,
"intermediate_dim:", self._intermediate_dim)
print("intputs:", self._inputs)
x = Dense(self._intermediate_dim, activation='relu')(self._inputs)
self._z_mean = Dense(self._latent_dim, name='z_mean')(x)
self._z_log_var = Dense(self._latent_dim, name='z_log_var')(x)
# Use reparameterization trick to push the sampling out as
# input (note that "output_shape" isn't necessary with the
# TensorFlow backend)
self._z = Lambda(self._sampling, output_shape=(self._latent_dim,),
name='z')([self._z_mean, self._z_log_var])
# instantiate encoder model. It provides two outputs:
# - (z_mean, z_log_var): a pair describing the mean and (log)
# variance of the code variable z (for input x)
# - z: a value sampled from that distribution
self._encoder = Model(self._inputs,
[self._z_mean, self._z_log_var, self._z],
name='encoder')
self._encoder.summary(print_fn=self._print_fn)
# plot_model requires pydot
#plot_model(self._encoder, to_file='vae_mlp_encoder.png',
# show_shapes=True)
#
# (2) build decoder model
#
latent_inputs = Input(shape=(self._latent_dim,), name='z_sampling')
x = Dense(self._intermediate_dim, activation='relu')(latent_inputs)
self._outputs = Dense(self._original_dim, activation='sigmoid')(x)
# instantiate decoder model
self._decoder = Model(latent_inputs, self._outputs, name='decoder')
self._decoder.summary(print_fn=self._print_fn)
# plot_model require pydot installed
#plot_model(self._decoder, to_file='vae_mlp_decoder.png', show_shapes=True)
#
# (3) define the loss function
#
self._outputs = self._decoder(self._encoder(self._inputs)[2])
if self._loss == 'mse':
reconstruction_loss = mse(self._inputs, self._outputs)
else:
reconstruction_loss = binary_crossentropy(self._inputs,
self._outputs)
# VAE loss = mse_loss or xent_loss + kl_loss
reconstruction_loss *= self._original_dim
kl_loss = (1 + self._z_log_var -
K.square(self._z_mean) - K.exp(self._z_log_var))
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
#
# (4) instantiate VAE model
#
self._vae = Model(self._inputs, self._outputs, name='vae_mlp')
self._vae.add_loss(vae_loss)
self._vae.compile(optimizer='adam')
self._vae.summary(print_fn=self._print_fn)
self._model = self._vae
def _unprepare(self) -> None:
self._model = None
self._vae = None
self._inputs = None
self._z_mean = None
self._z_log_var = None
self._z = None
self._encoder = None
self._outputs = None
self._decoder = None
super()._unprepare()
def _print_fn(self, line):
logger.info(line)
# use the reparameterization trick:
# instead of sampling from Q(z|X), sample eps = N(0,I)
# z = z_mean + sqrt(var)*eps
def _sampling(self, args):
"""Reparameterization trick by sampling fr an isotropic unit
Gaussian.
Arguments
---------
args (tensor): mean and log of variance of Q(z|X)
Returns
-------
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def train(self, data, validation, epochs, batch_size, progress):
#toolbox.acquire()
with self._graph.as_default():
with self._session.as_default():
self._vae.fit(data,
epochs=epochs,
verbose=0,
batch_size=batch_size,
validation_data=(validation, None),
callbacks=[progress])
#toolbox.release()
def encode(self, data, batch_size=None):
with self._graph.as_default():
with self._session.as_default():
z_mean, _, _ = \
self._encoder.predict(data, batch_size=batch_size)
return z_mean
def decode(self, data, batch_size=None):
with self._graph.as_default():
with self._session.as_default():
x_decoded = \
self._decoder.predict(data, batch_size=batch_size)
return x_decoded
def reconstruct(self, data, batch_size=None):
with self._graph.as_default():
with self._session.as_default():
reconstruction = self._vae.predict(data, batch_size=batch_size)
return reconstruction
def sample_code(self, input=None, params=None, n=1, batch_size=None):
"""Sample code values, either for given input values,
or for given parameters.
"""
with self._graph.as_default():
with self._session.as_default():
feed_dict = {}
if params is not None:
z_mean = params['z_mean']
if not instanceof(z_mean, np.ndarray):
z_mean = np.full(n, z_mean)
feed_dict[self._z_mean] = z_mean
z_log_var = params['z_log_var']
if not instanceof(z_log_var, np.ndarray):
z_log_var = np.full(n, z_log_var)
feed_dict[self._z_mean] = z_log_var
z = self._z.eval(feed_dict=feed_dict)
elif input is not None:
_, _, z= \
self._encoder.predict(input, batch_size=batch_size)
return z
| Petr-By/qtpyvis | models/example_keras_vae_mnist.py | Python | mit | 8,641 | [
"Gaussian"
] | 88db618a4e6a744c125c180c092edd18d74a9776178958acb1ebe5ca9194405e |
#! /usr/bin/env python
#
# @BEGIN LICENSE
#
# versioner.py: defines look-ahead auto-versioning from metadata.py
#
# Copyright (c) 2017 The Psi4 Developers
#
# All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
#
# @END LICENSE
#
from __future__ import print_function
import argparse
import os
import re
import subprocess
import sys
def collect_version_input_from_fallback(meta_file='metadata.py'):
"""From *meta_file*, collect lines matching ``_version_{key} = {value}``
and return as dictionary.
"""
cwd = os.path.dirname(os.path.abspath(__file__))
res = dict(re.findall("__version_([a-z_]+)\s*=\s*'([^']+)'", open(cwd + '/' + meta_file).read()))
res.pop('_')
return res
def is_git_repo(cwd='./', dot_git_qualifies=False, no_git_cmd_result=False):
"""Returns boolean as to whether *cwd* is under git control. When no ``git``
command available in environment, *no_git_cmd_result* returned. If within
the .git directory of a git repository, *dot_git_qualifies* returned.
"""
command = 'git rev-parse --is-inside-work-tree'
try:
process = subprocess.Popen(
command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, universal_newlines=True)
except EnvironmentError as e:
# most likely, git command not available
return no_git_cmd_result
(out, err) = process.communicate()
if process.returncode != 0:
# fatal: Not a git repository (or any of the parent directories): .git
return False
if out.strip() == 'true':
# in a git repo and not within .git dir
return True
if out.strip() == 'false':
# in a git repo in .git dir
return dot_git_qualifies
def collect_version_input_from_git():
"""Returns a dictionary filled with ``git describe`` results, clean/dirty
flag, and branch status. *cwd* should already be confirmed as a git
repository; this doesn't catch returncodes or EnvironmentErrors because the
raised errors are preferred to incomplete return dictionary.
"""
cwd = os.path.dirname(os.path.abspath(__file__))
res = {}
# * only want annotated tags, so not --all
# * in case *no* tags (impossible in Psi4), --always gets at least hash
# * get commits & hash info even if on tag using --long
command = 'git describe --abbrev=7 --long --always HEAD'
process = subprocess.Popen(
command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, universal_newlines=True)
(out, err) = process.communicate()
sane_describe = re.compile(
"""^v?(?P<tag>(?P<forwardseries>(?P<major>\d+)\.(?P<minor>\d+))[\.]?(?P<patch>\d+)?[-]?(?P<prere>((a)|(b)|(rc))\d+)?)[-]?(?P<commits>\d+)?[-]?(?P<gsha>\w+)?|(?P<sha>\w+)?$"""
)
mobj = sane_describe.match(out)
if mobj.group('tag'):
# We got a tag!
# normal: 0.1-62-ga68d223
res['latest_annotated_v_tag'] = mobj.group('tag')[:-1] # drop the "v"; tag mismatch caught later
res['commits_since_tag'] = mobj.group('commits')
res['seven_char_hash'] = mobj.group('gsha')[1:] # drop the "g" git identifier
else:
# no tag present: a68d223
res['latest_annotated_v_tag'] = ''
res['commits_since_tag'] = ''
res['seven_char_hash'] = mobj.group('sha') # no prepended "g"
command = 'git diff-index --name-only HEAD'
process = subprocess.Popen(
command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, universal_newlines=True)
(out, err) = process.communicate()
res['is_clean'] = False if str(out).rstrip() else True
command = 'git rev-parse --abbrev-ref HEAD' # returns HEAD when detached
process = subprocess.Popen(
command.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd, universal_newlines=True)
(out, err) = process.communicate()
res['branch_name'] = str(out).rstrip()
return res
def reconcile_and_compute_version_output(quiet=False):
res = collect_version_input_from_fallback(meta_file='metadata.py')
meta_latest_annotated_v_tag, _, meta_seven_char_hash = res['long'].partition('+')
meta_most_recent_release = res['most_recent_release']
# this is the tag format (PEP440 compliant) that our machinery is expecting.
# let's catch any deviations with Travis before it can corrupt versioning.
sane_tag = re.compile(
"""^(?P<tag>(?P<forwardseries>(?P<major>\d+)\.(?P<minor>\d+))[\.]?(?P<patch>[0-9]+)?[-]?(?P<prere>((a)|(b)|(rc))\d+)?)$"""
)
mobj = sane_tag.match(meta_latest_annotated_v_tag)
if mobj:
# some versioning machinery (looking at you, CMake) does strictly
# numerical comparisons such as M.m.p.t and thus can't handle
# prereleases and dev snapshots. We compute a Most Recent Ancestral
# Release tag (e.g., 1.0 or 1.12.1) for a backward release series.
backwardseries = mobj.group('tag')
if mobj.group('prere'):
backwardseries = meta_most_recent_release
else:
print("""Tag in {} is malformed: {}""".format('metadata.py', meta_latest_annotated_v_tag))
sys.exit()
cwd = os.path.dirname(os.path.abspath(__file__))
if is_git_repo(cwd=cwd):
res.update(collect_version_input_from_git())
# establish the default response
project_release = False
project_prerelease = False
project_version = 'undefined'
project_version_long = 'undefined+' + res['seven_char_hash']
if res['latest_annotated_v_tag'] == meta_latest_annotated_v_tag:
trial_version_long_release = res['latest_annotated_v_tag'] + '+' + res['seven_char_hash']
trial_version_devel = res['upcoming_annotated_v_tag'] + '.dev' + res['commits_since_tag']
trial_version_long_devel = trial_version_devel + '+' + res['seven_char_hash']
if int(res['commits_since_tag']) == 0:
if trial_version_long_release == res['long']:
print("""Amazing, this can't actually happen that git hash stored at git commit.""")
sys.exit()
else:
if meta_seven_char_hash == 'zzzzzzz':
if not quiet:
print("""Defining {} version: {} (recorded and computed)""".format(
'prerelease' if mobj.group('prere') else 'release', trial_version_long_release))
project_release = res['is_clean'] and not mobj.group('prere')
project_prerelease = res['is_clean'] and mobj.group('prere')
project_version = meta_latest_annotated_v_tag
project_version_long = trial_version_long_release
else:
print(
"""Undefining version for irreconcilable hashes: {} (computed) vs {} (recorded)""".format(
trial_version_long_release, res['long']))
else:
if res['branch_name'].endswith('.x'):
print(
"""Undefining version as development snapshots not allowed on maintenance branch: {} (rejected computed)""".
format(trial_version_long_devel))
# TODO prob should be undef unless on master
else:
if not quiet:
print("""Defining development snapshot version: {} (computed)""".format(
trial_version_long_devel))
project_version = trial_version_devel
project_version_long = trial_version_long_devel
else:
print("""Undefining version for irreconcilable tags: {} (computed) vs {} (recorded)""".format(
res['latest_annotated_v_tag'], meta_latest_annotated_v_tag))
else:
print("""Blindly (no git) accepting release version: {} (recorded)""".format(res['long']))
# assumes that zip only comes from [pre]release. GitHub hides others, but they're there.
project_release = not bool(mobj.group('prere'))
project_prerelease = bool(mobj.group('prere'))
project_version = meta_latest_annotated_v_tag
project_version_long = res['long']
res['is_clean'] = True
res['branch_name'] = ''
def mapped_cmake_version(last_release, is_release):
"""CMake expects MAJOR.MINOR.PATCH.TWEAK. The ancestral *last_release*
is padded into the first three roles. If not *is_release*, the tweak role
collects all postrelease states (prereleases and devel snapshots) into
dummy 999 that at least gets them sorted correctly between releases and
allows EXACT CMake version comparisons. Returns, for example, 1.1.0.0 for
release 1.1, 1.3.4.0 for maintenance release 1.3.4, and 1.0.0.999 for
prerelease 1.1a1 or snapshot 1.1.dev600
"""
cm = last_release.split('.')
cm += ['0'] * (4 - len(cm))
if not is_release:
cm[-1] = '999'
cm = '.'.join(cm)
return cm
return {
'__version__': project_version,
'__version_long': project_version_long,
'__version_is_clean': res['is_clean'],
'__version_branch_name': res['branch_name'],
'__version_last_release': backwardseries,
'__version_cmake': mapped_cmake_version(backwardseries, project_release),
'__version_release': project_release,
'__version_prerelease': project_prerelease
}
def write_new_metafile(versdata, outfile='metadata.out.py'):
formatter_fn = """
def version_formatter(formatstring='{version}'):
if formatstring == 'all':
formatstring = '{version} {{{branch}}} {githash} {cmake} {clean} {release} {lastrel} <-- {versionlong}'
release = 'release' if (__version_release == 'True') else ('prerelease' if (__version_prerelease == 'True') else '')
ans = formatstring.format(version=__version__,
versionlong=__version_long,
githash=__version_long[len(__version__)+1:],
clean='' if __version_is_clean == 'True' else 'dirty',
branch=__version_branch_name,
lastrel=__version_last_release,
cmake=__version_cmake,
release=release)
return ans
"""
main_fn = """
if __name__ == '__main__':
print(version_formatter(formatstring='all'))
"""
with open(os.path.abspath(outfile), 'w') as handle:
for k in sorted(versdata):
handle.write("""{} = '{}'\n""".format(k, versdata[k]))
handle.write(formatter_fn)
handle.write(main_fn)
def write_new_cmake_metafile(versdata, outfile='metadata.out.cmake'):
main_fn = """
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
${{WTO}}/${{PN}}ConfigVersion.cmake
VERSION {ver}
COMPATIBILITY SameMajorVersion)
"""
with open(os.path.abspath(outfile), 'w') as handle:
handle.write(main_fn.format(ver=versdata['__version_cmake']))
def write_new_header_metafile(versdata, project_name, outfile='metadata.out.h'):
main_fn = """
#pragma once
#define PROJECT_VERSION_MAJOR {major}
#define PROJECT_VERSION_MINOR {minor}
#define PROJECT_VERSION_PATCH {patch}
#define PROJECT_VERSION_DESCRIBE \"{describe}\"
#define {pn_allcaps}_VERSION ((PROJECT_VERSION_MAJOR << 16) | PROJECT_VERSION_MINOR | PROJECT_VERSION_PATCH)
#define PROJECT_VERSION \"{version}\"
#define GIT_COMMIT_HASH \"{githash}\"
#define GIT_COMMIT_BRANCH \"{branch}\"
"""
version = versdata['__version__']
version_branch_name = versdata['__version_branch_name']
version_long = versdata['__version_long']
try:
major, minor, patch, describe = version_long.split('.')
except ValueError:
major, minor, patch, describe = (0, 0, 0, 'uuuuuuu')
with open(os.path.abspath(outfile), 'w') as handle:
handle.write(
main_fn.format(
major=major,
minor=minor,
patch=patch,
pn_allcaps=project_name.upper(),
describe=describe,
version=version_long,
githash=version_long[len(version) + 1:],
branch=version_branch_name))
def version_formatter(versdata, formatstring="""{version}"""):
"""Return version information string with data from *versdata* when
supplied with *formatstring* suitable for ``formatstring.format()``.
Use plaintext and any placeholders among: version, versionlong, githash,
branch, clean, release, lastrel, cmake. For example, '{branch}@{githash}'
returns something like 'fix200@1234567'.
"""
if formatstring == 'all':
formatstring = '{version} {{{branch}}} {githash} {cmake} {clean} {release} {lastrel} <-- {versionlong}'
release = 'release' if versdata['__version_release'] else ('prerelease'
if versdata['__version_prerelease'] else '')
ans = formatstring.format(
version=versdata['__version__'],
versionlong=versdata['__version_long'],
githash=versdata['__version_long'][len(versdata['__version__']) + 1:],
clean='' if versdata['__version_is_clean'] else 'dirty',
branch=versdata['__version_branch_name'],
lastrel=versdata['__version_last_release'],
cmake=versdata['__version_cmake'],
release=release)
return ans
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=
'Script to extract PCMSolver version from source. Use pcmsolver.version_formatter(fmt_string) after build.')
parser.add_argument('--metaout', default='metadata.out.py', help='file to which the computed version info written')
parser.add_argument(
'--cmakeout', default='metadata.out.cmake', help='file to which the CMake ConfigVersion generator written')
parser.add_argument('--headerout', default='metadata.out.h', help='header file to which the version info written')
parser.add_argument(
'--format', default='all', help='string like "{version} {githash}" to be filled in and returned')
parser.add_argument(
'--formatonly', action='store_true', help='print only the format string, not the detection info')
args = parser.parse_args()
ans = reconcile_and_compute_version_output(quiet=args.formatonly)
write_new_metafile(ans, args.metaout)
write_new_cmake_metafile(ans, args.cmakeout)
write_new_header_metafile(ans, 'PCMSolver', args.headerout)
ans2 = version_formatter(ans, formatstring=args.format)
print(ans2)
| robertodr/pcmsolver | tools/versioner.py | Python | lgpl-3.0 | 14,918 | [
"Psi4"
] | 4e713dad43bad478ad93b8488240217144c766a72d7d1d91a016f7c8cf020fbc |
from datetime import datetime
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render
from rango.bing_search import run_query
from rango.forms import CategoryForm
from rango.forms import PageForm
from rango.models import Category
from rango.models import Page, User, UserProfile
from django.shortcuts import redirect
def index(request):
# Query the database for a list of ALL categories currently stored.
# Order the categories by no. likes in descending order.
# Retrieve the top 5 only - or all if less than 5.
# Place the list in our context_dict dictionary which will be passed to the template engine.
category_list = Category.objects.order_by('-likes')[:5]
page_list = Page.objects.order_by('-views')[:5]
context_dict = {'categories': category_list, 'pages': page_list}
# Get the number of visits to the site.
# We use the COOKIES.get() function to obtain the visits cookie.
# If the cookie exists, the value returned is casted to an integer.
# If the cookie doesn't exist, we default to zero and cast that.
visits = request.session.get('visits')
if not visits:
visits = 1
reset_last_visit_time = False
last_visit = request.session.get('last_visit')
if last_visit:
last_visit_time = datetime.strptime(last_visit[:-7], "%Y-%m-%d %H:%M:%S")
if (datetime.now() - last_visit_time).days > 0:
# ...reassign the value of the cookie to +1 of what it was before...
visits = visits + 1
# ...and update the last visit cookie, too.
reset_last_visit_time = True
else:
# Cookie last_visit doesn't exist, so create it to the current date/time.
reset_last_visit_time = True
if reset_last_visit_time:
request.session['last_visit'] = str(datetime.now())
request.session['visits'] = visits
context_dict['visits'] = visits
response = render(request,'rango/index.html', context_dict)
return response
def about(request):
# Construct a dictionary to pass to the template engine as its context.
# Note the key boldmessage is the same as {{ boldmessage }} in the template!
context_dict = {'standardmessage': "This tutorial has been put together by Enzo Roiz, 2161561."}
# If the visits session varible exists, take it and use it.
# If it doesn't, we haven't visited the site so set the count to zero.
if request.session.get('visits'):
visits = request.session.get('visits')
else:
visits = 0
context_dict['visits'] = visits
# Return a rendered response to send to the client.
# We make use of the shortcut function to make our lives easier.
# Note that the first parameter is the template we wish to use.
return render(request, 'rango/about.html', context_dict)
def category(request, category_name_slug):
# Create a context dictionary which we can pass to the template rendering engine.
context_dict = {}
context_dict['result_list'] = None
context_dict['query'] = None
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
context_dict['result_list'] = result_list
context_dict['query'] = query
try:
# Can we find a category name slug with the given name?
# If we can't, the .get() method raises a DoesNotExist exception.
# So the .get() method returns one model instance or raises an exception.
category = Category.objects.get(slug=category_name_slug)
context_dict['category_name'] = category.name
# Count the category views
try:
category.views = category.views + 1
category.save()
except:
pass
# Retrieve all of the associated pages.
# Note that filter returns >= 1 model instance.
pages = Page.objects.filter(category=category).order_by('-views')
# Adds our results list to the template context under name pages.
context_dict['pages'] = pages
# We also add the category object from the database to the context dictionary.
# We'll use this in the template to verify that the category exists.
context_dict['category'] = category
#Category Slug
context_dict['category_slug'] = category.slug
except Category.DoesNotExist:
# We get here if we didn't find the specified category.
# Don't do anything - the template displays the "no category" message for us.
return render(request, 'rango/category.html', context_dict)
if not context_dict['query']:
context_dict['query'] = category.name
# Go render the response and return it to the client.
return render(request, 'rango/category.html', context_dict)
@login_required
def add_category(request):
# A HTTP POST?
if request.method == 'POST':
form = CategoryForm(request.POST)
# Have we been provided with a valid form?
if form.is_valid():
# Save the new category to the database.
form.save(commit=True)
# Now call the index() view.
# The user will be shown the homepage.
return index(request)
else:
# The supplied form contained errors - just print them to the terminal.
print form.errors
else:
# If the request was not a POST, display the form to enter details.
form = CategoryForm()
# Bad form (or form details), no form supplied...
# Render the form with error messages (if any).
return render(request, 'rango/add_category.html', {'form': form})
@login_required
def add_page(request, category_name_slug):
try:
cat = Category.objects.get(slug=category_name_slug)
except Category.DoesNotExist:
cat = None
if request.method == 'POST':
form = PageForm(request.POST)
if form.is_valid():
if cat:
page = form.save(commit=False)
page.category = cat
page.views = 0
page.save()
# probably better to use a redirect here.
return category(request, category_name_slug)
else:
print form.errors
else:
form = PageForm()
context_dict = {'form':form, 'category': cat}
def clean(self):
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
# If url is not empty and doesn't start with 'http://', prepend 'http://'.
if url and not url.startswith('http://'):
url = 'http://' + url
cleaned_data['url'] = url
return cleaned_data
return render(request, 'rango/add_page.html', context_dict)
# def register(request):
#
# # A boolean value for telling the template whether the registration was successful.
# # Set to False initially. Code changes value to True when registration succeeds.
# registered = False
#
# # If it's a HTTP POST, we're interested in processing form data.
# if request.method == 'POST':
# # Attempt to grab information from the raw form information.
# # Note that we make use of both UserForm and UserProfileForm.
# user_form = UserForm(data=request.POST)
# profile_form = UserProfileForm(data=request.POST)
#
# # If the two forms are valid...
# if user_form.is_valid() and profile_form.is_valid():
# # Save the user's form data to the database.
# user = user_form.save()
#
# # Now we hash the password with the set_password method.
# # Once hashed, we can update the user object.
# user.set_password(user.password)
# user.save()
#
# # Now sort out the UserProfile instance.
# # Since we need to set the user attribute ourselves, we set commit=False.
# # This delays saving the model until we're ready to avoid integrity problems.
# profile = profile_form.save(commit=False)
# profile.user = user
#
# # Did the user provide a profile picture?
# # If so, we need to get it from the input form and put it in the UserProfile model.
# if 'picture' in request.FILES:
# profile.picture = request.FILES['picture']
#
# # Now we save the UserProfile model instance.
# profile.save()
#
# # Update our variable to tell the template registration was successful.
# registered = True
#
# # Invalid form or forms - mistakes or something else?
# # Print problems to the terminal.
# # They'll also be shown to the user.
# else:
# print user_form.errors, profile_form.errors
#
# # Not a HTTP POST, so we render our form using two ModelForm instances.
# # These forms will be blank, ready for user input.
# else:
# user_form = UserForm()
# profile_form = UserProfileForm()
#
# # Render the template depending on the context.
# return render(request,
# 'rango/register.html',
# {'user_form': user_form, 'profile_form': profile_form, 'registered': registered} )
# def user_login(request):
#
# # If the request is a HTTP POST, try to pull out the relevant information.
# if request.method == 'POST':
# # Gather the username and password provided by the user.
# # This information is obtained from the login form.
# username = request.POST['username']
# password = request.POST['password']
#
# # Use Django's machinery to attempt to see if the username/password
# # combination is valid - a User object is returned if it is.
# user = authenticate(username=username, password=password)
#
# # If we have a User object, the details are correct.
# # If None (Python's way of representing the absence of a value), no user
# # with matching credentials was found.
# if user:
# # Is the account active? It could have been disabled.
# if user.is_active:
# # If the account is valid and active, we can log the user in.
# # We'll send the user back to the homepage.
# login(request, user)
# return HttpResponseRedirect('/rango/')
# else:
# # An inactive account was used - no logging in!
# return HttpResponse("Your Rango account is disabled.")
# else:
# # Bad login details were provided. So we can't log the user in.
# if not username or not password:
# invalid_login = "Username and/or Password fields empty"
# else:
# invalid_login = "The login information for the user \"{0}\" doesn't match our database.".format(username)
# return render(request, 'rango/login.html', {'invalid_login': invalid_login})
#
# # The request is not a HTTP POST, so display the login form.
# # This scenario would most likely be a HTTP GET.
# else:
# # No context variables to pass to the template system, hence the
# # blank dictionary object...
# return render(request, 'rango/login.html')
@login_required
def restricted(request):
return render(request, 'rango/restricted.html')
def search(request):
result_list = []
if request.method == 'POST':
query = request.POST['query'].strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
return render(request, 'rango/search.html', {'result_list': result_list})
# Use the login_required() decorator to ensure only those logged in can access the view.
# @login_required
# def user_logout(request):
# # Since we know the user is logged in, we can now just log them out.
# logout(request)
#
# # Take the user back to the homepage.
# return HttpResponseRedirect('/rango/')
def track_url(request):
page_id = None
url = '/rango/'
if request.method == 'GET':
if 'page_id' in request.GET:
page_id = request.GET['page_id']
try:
page = Page.objects.get(id=page_id)
page.views = page.views + 1
page.save()
url = page.url
except:
pass
return redirect(url)
@login_required
def like_category(request):
cat_id = None
if request.method == 'GET':
cat_id = request.GET['category_id']
likes = 0
if cat_id:
cat = Category.objects.get(id=int(cat_id))
if cat:
likes = cat.likes + 1
cat.likes = likes
cat.save()
return HttpResponse(likes)
@login_required
def category_search(request):
context_dict = {}
context_dict['result_list'] = None
context_dict['query'] = None
if request.method == 'GET':
query = request.GET['query'].strip()
if query:
# Run our Bing function to get the results list!
result_list = run_query(query)
context_dict['result_list'] = result_list
context_dict['query'] = query
return render(request, 'rango/category_search.html', context_dict)
def suggest_category(request):
def get_category_list(max_results=0, starts_with=''):
cat_list = Category.objects.all()
if starts_with:
cat_list = Category.objects.filter(name__istartswith=starts_with)
if max_results > 0:
if len(cat_list) > max_results:
cat_list = cat_list[:max_results]
return cat_list
starts_with = ''
catid = None
act_cat = None
if request.method == 'GET':
starts_with = request.GET['suggestion']
if request.GET['catid'] != '':
catid = request.GET['catid']
act_cat = Category.objects.get(id=catid)
cats = get_category_list(8, starts_with)
return render(request, 'rango/cats.html', {'cats': cats, 'act_cat': act_cat })
@login_required
def edit_profile(request):
# Get actual user
user = request.user
user_profile, created = UserProfile.objects.get_or_create(user=user)
# Check if request is by POST or GET method
if request.method == 'POST':
if 'website' in request.POST:
user_profile.website = request.POST["website"]
if 'picture' in request.FILES:
user_profile.picture = request.FILES["picture"]
user_profile.save()
return redirect(index)
else:
return render(request, 'rango/edit_profile.html', {'user_profile':user_profile})
@login_required
def profile(request, username):
user = request.user
act_user = User.objects.get(username=username)
user_profile, created = UserProfile.objects.get_or_create(user=act_user)
return render(request, 'rango/profile.html', {'user_profile': user_profile, 'act_user': act_user, 'user':user })
@login_required
def users_profiles(request):
user_list = User.objects.all()
userprofile_list = UserProfile.objects.all()
return render(request, 'rango/users_profiles.html', {'user_list' : user_list, 'userprofile_list' : userprofile_list}) | leifos/tango_with_tests | tango_with_django_project/rango/views.py | Python | mit | 15,580 | [
"VisIt"
] | 17e2bc0de9866ff80576603de7d52fc578ce4d403d8f462dbff0312b4f719c34 |
#!/usr/bin/env python
import pysam
import argparse
from collections import defaultdict as dd
from bx.intervals.intersection import Intersecter, Interval # pip install bx-python
import logging
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main(args):
header = []
support = {}
forest = dd(Intersecter)
window = int(args.window)
with open(args.table, 'r') as table:
for i, line in enumerate(table):
if i == 0:
header = line.strip().split('\t')
print(line.strip())
else:
rec = {}
for n, field in enumerate(line.strip().split('\t')):
rec[header[n]] = field
uuid = rec['UUID']
chrom = rec['Chromosome']
start = int(rec['Left_Extreme'])
end = int(rec['Right_Extreme'])
support[uuid] = int(rec['Split_reads_5prime']) + int(rec['Split_reads_3prime'])
forest[chrom].add_interval(Interval(start, end, value=uuid))
with open(args.table, 'r') as table:
for i, line in enumerate(table):
if i == 0:
header = line.strip().split('\t')
else:
rec = {}
for n, field in enumerate(line.strip().split('\t')):
rec[header[n]] = field
uuid = rec['UUID']
chrom = rec['Chromosome']
start = int(rec['Left_Extreme'])-window
end = int(rec['Right_Extreme'])+window
biggest = True
for prox in forest[chrom].find(start, end):
if prox.value != uuid:
if support[prox.value] > int(rec['Split_reads_5prime']) + int(rec['Split_reads_3prime']):
biggest = False
if biggest:
print(line.strip())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remove adjacent TE detections')
parser.add_argument('-t', '--table', required=True, help='TEBreak table')
parser.add_argument('-w', '--window', default=500)
args = parser.parse_args()
main(args)
| adamewing/tebreak | scripts/misc/decluster.py | Python | mit | 2,311 | [
"pysam"
] | d17ce394fdc90862d755c0e70f215d61099e4d61ea735f48e0d5bf73b6203f7d |
# Set up a basic scene for rendering.
from paraview.simple import *
import os
import sys
script = """
import paraview.numpy_support
# Utility to get next color
def getNextColor():
colors = 'bgrcmykw'
for c in colors:
yield c
# This function must be defined. It is where specific data arrays are requested.
def setup_data(view):
print "Setting up data"
# This function must be defined. It is where the actual rendering commands for matplotlib go.
def render(view,width,height):
from paraview import python_view
figure = python_view.matplotlib_figure(width,height)
ax = figure.add_subplot(111)
ax.hold = True
numObjects = view.GetNumberOfVisibleDataObjects()
print "num visible objects: ", numObjects
for i, color in zip(xrange(0,numObjects), getNextColor()):
dataObject = view.GetVisibleDataObjectForRendering(i)
if dataObject:
vtk_points = dataObject.GetPoints()
if vtk_points:
vtk_points_data = vtk_points.GetData()
pts = paraview.numpy_support.vtk_to_numpy(vtk_points_data)
x, y = pts[:,0], pts[:,1]
ax.scatter(x, y, color=color)
ax.hold = False
return python_view.figure_to_image(figure)
"""
view = CreateView("PythonView")
view.Script = script
cone = Cone()
Show(cone, view)
sphere = Sphere()
Show(sphere, view)
Render()
try:
baselineIndex = sys.argv.index('-B')+1
baselinePath = sys.argv[baselineIndex]
except:
print "Could not get baseline directory. Test failed."
baseline_file = os.path.join(baselinePath, "TestPythonViewMatplotlibScript.png")
import vtk.test.Testing
vtk.test.Testing.VTK_TEMP_DIR = vtk.util.misc.vtkGetTempDir()
vtk.test.Testing.compareImage(view.GetRenderWindow(), baseline_file, threshold=25)
vtk.test.Testing.interact()
Delete(cone)
del cone
Delete(sphere)
del sphere
| HopeFOAM/HopeFOAM | ThirdParty-0.1/ParaView-5.0.1/Applications/ParaView/Testing/Python/TestPythonViewMatplotlibScript.py | Python | gpl-3.0 | 1,798 | [
"ParaView",
"VTK"
] | 895fb9b8631a1d1a2d9d1c2173c667ec80bd45e5873cefb5409350bf7c39ce9f |
""" Base Storage Class provides the base interface for all storage plug-ins
exists()
These are the methods for manipulating files:
isFile()
getFile()
putFile()
removeFile()
getFileMetadata()
getFileSize()
prestageFile()
getTransportURL()
These are the methods for manipulating directories:
isDirectory()
getDirectory()
putDirectory()
createDirectory()
removeDirectory()
listDirectory()
getDirectoryMetadata()
getDirectorySize()
These are the methods for manipulating the client:
changeDirectory()
getCurrentDirectory()
getName()
getParameters()
getCurrentURL()
These are the methods for getting information about the Storage:
getOccupancy()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import errno
import json
import os
import shutil
import tempfile
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.Pfn import pfnparse, pfnunparse
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Resources.Storage.Utilities import checkArgumentFormat
class StorageBase(object):
"""
.. class:: StorageBase
"""
PROTOCOL_PARAMETERS = ["Protocol", "Host", "Path", "Port", "SpaceToken", "WSUrl"]
# Options to be prepended in the URL
# keys are the name of the parameters in the CS
# values are the name of the options as they appear in the URL
DYNAMIC_OPTIONS = {}
def __init__(self, name, parameterDict):
self.name = name
self.pluginName = ''
self.protocolParameters = {}
self.__updateParameters(parameterDict)
# Keep the list of all parameters passed for constructions
# Taken from the CS
# In a further major release, this could be nerged together
# with protocolParameters. There is no reason for it to
# be so strict about the possible content.
self._allProtocolParameters = parameterDict
if hasattr(self, '_INPUT_PROTOCOLS'):
self.protocolParameters['InputProtocols'] = getattr(self, '_INPUT_PROTOCOLS')
else:
self.protocolParameters['InputProtocols'] = [self.protocolParameters['Protocol'], 'file']
if hasattr(self, '_OUTPUT_PROTOCOLS'):
self.protocolParameters['OutputProtocols'] = getattr(self, '_OUTPUT_PROTOCOLS')
else:
self.protocolParameters['OutputProtocols'] = [self.protocolParameters['Protocol']]
self.basePath = parameterDict['Path']
self.cwd = self.basePath
self.se = None
self.isok = True
# use True for backward compatibility
self.srmSpecificParse = True
def setStorageElement(self, se):
self.se = se
def setParameters(self, parameterDict):
""" Set standard parameters, method can be overriden in subclasses
to process specific parameters
"""
self.__updateParameters(parameterDict)
def __updateParameters(self, parameterDict):
""" setParameters implementation method
"""
for item in self.PROTOCOL_PARAMETERS:
self.protocolParameters[item] = parameterDict.get(item, '')
def getParameters(self):
""" Get the parameters with which the storage was instantiated
"""
parameterDict = dict(self.protocolParameters)
parameterDict["StorageName"] = self.name
parameterDict["PluginName"] = self.pluginName
parameterDict['URLBase'] = self.getURLBase().get('Value', '')
parameterDict['Endpoint'] = self.getEndpoint().get('Value', '')
return parameterDict
def exists(self, *parms, **kws):
"""Check if the given path exists
"""
return S_ERROR("Storage.exists: implement me!")
#############################################################
#
# These are the methods for file manipulation
#
def isFile(self, *parms, **kws):
"""Check if the given path exists and it is a file
"""
return S_ERROR("Storage.isFile: implement me!")
def getFile(self, *parms, **kws):
"""Get a local copy of the file specified by its path
"""
return S_ERROR("Storage.getFile: implement me!")
def putFile(self, *parms, **kws):
"""Put a copy of the local file to the current directory on the
physical storage
"""
return S_ERROR("Storage.putFile: implement me!")
def removeFile(self, *parms, **kws):
"""Remove physically the file specified by its path
"""
return S_ERROR("Storage.removeFile: implement me!")
def getFileMetadata(self, *parms, **kws):
""" Get metadata associated to the file
"""
return S_ERROR("Storage.getFileMetadata: implement me!")
def getFileSize(self, *parms, **kws):
"""Get the physical size of the given file
"""
return S_ERROR("Storage.getFileSize: implement me!")
def prestageFile(self, *parms, **kws):
""" Issue prestage request for file
"""
return S_ERROR("Storage.prestageFile: implement me!")
def prestageFileStatus(self, *parms, **kws):
""" Obtain the status of the prestage request
"""
return S_ERROR("Storage.prestageFileStatus: implement me!")
def pinFile(self, *parms, **kws):
""" Pin the file on the destination storage element
"""
return S_ERROR("Storage.pinFile: implement me!")
def releaseFile(self, *parms, **kws):
""" Release the file on the destination storage element
"""
return S_ERROR("Storage.releaseFile: implement me!")
#############################################################
#
# These are the methods for directory manipulation
#
def isDirectory(self, *parms, **kws):
"""Check if the given path exists and it is a directory
"""
return S_ERROR("Storage.isDirectory: implement me!")
def getDirectory(self, *parms, **kws):
"""Get locally a directory from the physical storage together with all its
files and subdirectories.
"""
return S_ERROR("Storage.getDirectory: implement me!")
def putDirectory(self, *parms, **kws):
"""Put a local directory to the physical storage together with all its
files and subdirectories.
"""
return S_ERROR("Storage.putDirectory: implement me!")
def createDirectory(self, *parms, **kws):
""" Make a new directory on the physical storage
"""
return S_ERROR("Storage.createDirectory: implement me!")
def removeDirectory(self, *parms, **kws):
"""Remove a directory on the physical storage together with all its files and
subdirectories.
"""
return S_ERROR("Storage.removeDirectory: implement me!")
def listDirectory(self, *parms, **kws):
""" List the supplied path
"""
return S_ERROR("Storage.listDirectory: implement me!")
def getDirectoryMetadata(self, *parms, **kws):
""" Get the metadata for the directory
"""
return S_ERROR("Storage.getDirectoryMetadata: implement me!")
def getDirectorySize(self, *parms, **kws):
""" Get the size of the directory on the storage
"""
return S_ERROR("Storage.getDirectorySize: implement me!")
#############################################################
#
# These are the methods for manipulating the client
#
def isOK(self):
return self.isok
def resetCurrentDirectory(self):
""" Reset the working directory to the base dir
"""
self.cwd = self.basePath
def changeDirectory(self, directory):
""" Change the directory to the supplied directory
"""
if directory.startswith('/'):
self.cwd = "%s/%s" % (self.basePath, directory)
else:
self.cwd = '%s/%s' % (self.cwd, directory)
def getCurrentDirectory(self):
""" Get the current directory
"""
return self.cwd
def getCurrentURL(self, fileName):
""" Obtain the current file URL from the current working directory and the filename
:param self: self reference
:param str fileName: path on storage
"""
urlDict = dict(self.protocolParameters)
if not fileName.startswith('/'):
# Relative path is given
urlDict['Path'] = self.cwd
result = pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
if not result['OK']:
return result
cwdUrl = result['Value']
fullUrl = '%s%s' % (cwdUrl, fileName)
return S_OK(fullUrl)
def getName(self):
""" The name with which the storage was instantiated
"""
return self.name
def getURLBase(self, withWSUrl=False):
""" This will get the URL base. This is then appended with the LFN in DIRAC convention.
:param self: self reference
:param bool withWSUrl: flag to include Web Service part of the url
:returns: URL
"""
urlDict = dict(self.protocolParameters)
if not withWSUrl:
urlDict['WSUrl'] = ''
return pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
def getEndpoint(self):
""" This will get endpoint of the storage. It basically is the same as :py:meth:`getURLBase`
but without the basePath
:returns: 'proto://hostname<:port>'
"""
urlDict = dict(self.protocolParameters)
# We remove the basePath
urlDict['Path'] = ''
return pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
def isURL(self, path):
""" Guess if the path looks like a URL
:param self: self reference
:param string path: input file LFN or URL
:returns boolean: True if URL, False otherwise
"""
if self.basePath and path.startswith(self.basePath):
return S_OK(True)
result = pfnparse(path, srmSpecific=self.srmSpecificParse)
if not result['OK']:
return result
if len(result['Value']['Protocol']) != 0:
return S_OK(True)
if result['Value']['Path'].startswith(self.basePath):
return S_OK(True)
return S_OK(False)
def getTransportURL(self, pathDict, protocols):
""" Get a transport URL for a given URL. For a simple storage plugin
it is just returning input URL if the plugin protocol is one of the
requested protocols
:param dict pathDict: URL obtained from File Catalog or constructed according
to convention
:param protocols: a list of acceptable transport protocols in priority order
:type protocols: `python:list`
"""
res = checkArgumentFormat(pathDict)
if not res['OK']:
return res
urls = res['Value']
successful = {}
failed = {}
if protocols and not self.protocolParameters['Protocol'] in protocols:
return S_ERROR(errno.EPROTONOSUPPORT, 'No native protocol requested')
for url in urls:
successful[url] = url
resDict = {'Failed': failed, 'Successful': successful}
return S_OK(resDict)
def constructURLFromLFN(self, lfn, withWSUrl=False):
""" Construct URL from the given LFN according to the VO convention for the
primary protocol of the storage plagin
:param str lfn: file LFN
:param boolean withWSUrl: flag to include the web service part into the resulting URL
:return result: result['Value'] - resulting URL
"""
# Check the LFN convention:
# 1. LFN must start with the VO name as the top level directory
# 2. VO name must not appear as any subdirectory or file name
lfnSplitList = lfn.split('/')
voLFN = lfnSplitList[1]
# TODO comparison to Sandbox below is for backward compatibility, should
# be removed in the next release
if voLFN != self.se.vo and voLFN != "SandBox" and voLFN != "Sandbox":
return S_ERROR('LFN (%s) path must start with VO name (%s)' % (lfn, self.se.vo))
urlDict = dict(self.protocolParameters)
urlDict['Options'] = '&'.join("%s=%s" % (optionName, urlDict[paramName])
for paramName, optionName in self.DYNAMIC_OPTIONS.items()
if urlDict.get(paramName))
if not withWSUrl:
urlDict['WSUrl'] = ''
urlDict['FileName'] = lfn.lstrip('/')
return pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
def updateURL(self, url, withWSUrl=False):
""" Update the URL according to the current SE parameters
"""
result = pfnparse(url, srmSpecific=self.srmSpecificParse)
if not result['OK']:
return result
urlDict = result['Value']
urlDict['Protocol'] = self.protocolParameters['Protocol']
urlDict['Host'] = self.protocolParameters['Host']
urlDict['Port'] = self.protocolParameters['Port']
urlDict['WSUrl'] = ''
if withWSUrl:
urlDict['WSUrl'] = self.protocolParameters['WSUrl']
return pfnunparse(urlDict, srmSpecific=self.srmSpecificParse)
def isNativeURL(self, url):
""" Check if URL :url: is valid for :self.protocol:
:param self: self reference
:param str url: URL
"""
res = pfnparse(url, srmSpecific=self.srmSpecificParse)
if not res['OK']:
return res
urlDict = res['Value']
return S_OK(urlDict['Protocol'] == self.protocolParameters['Protocol'])
@staticmethod
def _addCommonMetadata(metadataDict):
""" To make the output of getFileMetadata uniform throughout the protocols
this returns a minimum set of metadata with default value,
that are then complemented with the protocol specific metadata
:param metadataDict: specific metadata of the protocol
:returns: dictionnary with all the metadata (specific and basic)
"""
commonMetadata = {'Checksum': '',
'Directory': False,
'File': False,
'Mode': 0o000,
'Size': 0,
'Accessible': True,
}
commonMetadata.update(metadataDict)
return commonMetadata
def _isInputURL(self, url):
""" Check if the given url can be taken as input
:param self: self reference
:param str url: URL
"""
res = pfnparse(url)
if not res['OK']:
return res
urlDict = res['Value']
# Special case of 'file' protocol which can be just a URL
if not urlDict['Protocol'] and 'file' in self.protocolParameters['InputProtocols']:
return S_OK(True)
return S_OK(urlDict['Protocol'] == self.protocolParameters['Protocol'])
#############################################################
#
# These are the methods for getting information about the Storage element:
#
def getOccupancy(self, **kwargs):
""" Get the StorageElement occupancy info in MB.
This generic implementation download a json file supposed to contain the necessary info.
:param occupancyLFN: (mandatory named argument) LFN of the json file.
:returns: S_OK/S_ERROR dictionary. The S_OK value should contain a dictionary with Total and Free space in MB
"""
# Build the URL for the occupancyLFN:
occupancyLFN = kwargs['occupancyLFN']
res = self.constructURLFromLFN(occupancyLFN)
if not res['OK']:
return res
occupancyURL = res['Value']
try:
# download the file locally
tmpDirName = tempfile.mkdtemp()
res = returnSingleResult(self.getFile(occupancyURL, localPath=tmpDirName))
if not res['OK']:
return res
filePath = os.path.join(tmpDirName, os.path.basename(occupancyLFN))
# Read its json content
with open(filePath, 'r') as occupancyFile:
return S_OK(json.load(occupancyFile))
except Exception as e:
return S_ERROR(repr(e))
finally:
# Clean the temporary dir
shutil.rmtree(tmpDirName)
| yujikato/DIRAC | src/DIRAC/Resources/Storage/StorageBase.py | Python | gpl-3.0 | 15,318 | [
"DIRAC"
] | f062b48286a8b3ac106d9f001f615e443214521ce02318793477702a671ccbd3 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Chord model and helper utilities"""
import librosa
import sklearn
import sklearn.hmm
import numpy as np
def beats_to_chords(beat_times, chord_times, chord_labels):
r'''Propagate lab-style annotations to a list of beat timings.
:parameters:
- beat_times : np.ndarray [shape=(n, 2)]
The time range (in seconds) for beat intervals.
The ``i`` th beat spans time ``beat_times[i, 0]``
to ``beat_times[i, 1]``.
``beat_times[0, 0]`` should be 0, ``beat_times[-1, 1]`` should
be the track duration.
- chord_times : np.ndarray [shape=(m, 2)]
The time range (in seconds) for the ``i`` th annotation is
``chord_times[i, 0]`` to ``chord_times[i, 1]``.
``chord_times[0, 0]`` should be 0, ``chord_times[-1, 1]`` should
be the track duration.
- chord_labels : list of str [shape=(m,)]
List of annotation strings associated with ``chord_times``
:returns:
- beat_labels : list of str [shape=(n,)]
Chord annotations at the beat level.
'''
interval_map = librosa.util.match_intervals(beat_times, chord_times)
return [chord_labels[c] for c in interval_map]
class ChordHMM(sklearn.hmm.GaussianHMM):
'''Gaussian-HMM chord model'''
def __init__(self,
chord_names,
covariance_type='full',
startprob=None,
transmat=None,
startprob_prior=None,
transmat_prior=None,
algorithm='viterbi',
means_prior=None,
means_weight=0,
covars_prior=0.01,
covars_weight=1,
random_state=None):
'''Construct a new Gaussian-HMM chord model.
:parameters:
- chord_names : list of str
List of the names of chords in the model
- remaining parameters:
See :class:`sklearn.hmm.GaussianHMM`
'''
n_components = len(chord_names)
sklearn.hmm.GaussianHMM.__init__(self,
n_components=n_components,
covariance_type=covariance_type,
startprob=startprob,
transmat=transmat,
startprob_prior=startprob_prior,
transmat_prior=transmat_prior,
algorithm=algorithm,
means_prior=means_prior,
means_weight=means_weight,
covars_prior=covars_prior,
covars_weight=covars_weight,
random_state=random_state)
self.n_features = None
# Build the chord mappings
self.chord_to_id_ = {}
self.id_to_chord_ = []
for index, value in enumerate(chord_names):
self.chord_to_id_[value] = index
self.id_to_chord_.append(value)
def predict_chords(self, obs):
'''Predict chords from an observation sequence
:parameters:
- obs : np.ndarray [shape=(n, d)]
Observation sequence, e.g., transposed beat-synchronous
chromagram.
:returns:
- labels : list of str [shape=(n,)]
For each row of ``obs``, the most likely chord label.
'''
return [self.id_to_chord_[s] for s in self.decode(obs)[1]]
def fit(self, obs, labels):
'''Supervised training.
- obs : list, obs[i] : np.ndarray [shape=(n_beats, n_features)]
A collection of observation sequences, e.g., ``obs[i]`` is a
chromagram
- labels : list-like (n_songs)
- ``labels[i]`` is list-like, (n_beats)
- ``labels[i][t]`` is a str
list or array of labels for the observations
'''
self.n_features = obs[0].shape[1]
sklearn.hmm.GaussianHMM._init(self, obs, 'stmc')
stats = sklearn.hmm.GaussianHMM._initialize_sufficient_statistics(self)
for obs_i, chords_i in zip(obs, labels):
# Synthesize a deterministic frame log-probability
framelogprob = np.empty((obs_i.shape[0], self.n_components))
posteriors = np.empty_like(framelogprob)
framelogprob.fill(-np.log(sklearn.hmm.EPS))
posteriors.fill(sklearn.hmm.EPS)
for t, chord in enumerate(chords_i):
state = self.chord_to_id_[chord]
framelogprob[t, state] = -sklearn.hmm.EPS
posteriors[t, state] = 1.0
_, fwdlattice = self._do_forward_pass(framelogprob)
bwdlattice = self._do_backward_pass(framelogprob)
self._accumulate_sufficient_statistics(stats,
obs_i,
framelogprob,
posteriors,
fwdlattice,
bwdlattice,
'stmc')
self._do_mstep(stats, params='stmc')
| ebattenberg/librosa | librosa/chord.py | Python | isc | 5,446 | [
"Gaussian"
] | f5c83834476c83fdb5380b5047dfeb4ebde28e837c847a0b1ad170bd7f1570dd |
# stdlib imports
import math
# ROOT/rootpy imports
import ROOT
from rootpy.plotting import Legend, Hist, HistStack
from rootpy.plotting.shapes import Arrow
import rootpy.plotting.utils as rootpy_utils
# local imports
from .. import PLOTS_DIR, save_canvas
from .templates import RatioPlot, SimplePlot
from ..utils import fold_overflow
from .utils import label_plot, legend_params, set_colors
from . import log
def uncertainty_band(model, systematics): #, systematics_components):
# TODO determine systematics from model itself
if not isinstance(model, (list, tuple)):
model = [model]
# add separate variations in quadrature
# also include stat error in quadrature
total_model = sum(model)
var_high = []
var_low = []
for term, variations in systematics.items():
if len(variations) == 2:
high, low = variations
elif len(variations) == 1:
high = variations[0]
low = 'NOMINAL'
else:
raise ValueError(
"only one or two variations "
"per term are allowed: {0}".format(variations))
"""
if systematics_components is not None:
if high not in systematics_components:
log.warning("filtering out {0}".format(high))
high = 'NOMINAL'
if low not in systematics_components:
log.warning("filtering out {0}".format(low))
low = 'NOMINAL'
"""
if high == 'NOMINAL' and low == 'NOMINAL':
continue
total_high = model[0].Clone()
total_high.Reset()
total_low = total_high.Clone()
total_max = total_high.Clone()
total_min = total_high.Clone()
for m in model:
if high == 'NOMINAL' or high not in m.systematics:
total_high += m.Clone()
else:
#print m.title, high, list(m.systematics[high])
total_high += m.systematics[high]
if low == 'NOMINAL' or low not in m.systematics:
total_low += m.Clone()
else:
#print m.title, low, list(m.systematics[low])
total_low += m.systematics[low]
if total_low.Integral() <= 0:
log.warning("{0}_DOWN is non-positive".format(term))
if total_high.Integral() <= 0:
log.warning("{0}_UP is non-positive".format(term))
for i in total_high.bins_range(overflow=True):
total_max[i].value = max(total_high[i].value, total_low[i].value, total_model[i].value)
total_min[i].value = min(total_high[i].value, total_low[i].value, total_model[i].value)
if total_min.Integral() <= 0:
log.warning("{0}: lower bound is non-positive".format(term))
if total_max.Integral() <= 0:
log.warning("{0}: upper bound is non-positive".format(term))
var_high.append(total_max)
var_low.append(total_min)
log.debug("{0} {1}".format(str(term), str(variations)))
log.debug("{0} {1} {2}".format(
total_max.integral(),
total_model.integral(),
total_min.integral()))
#log.debug(str(systematics_components))
# include stat error variation
total_model_stat_high = total_model.Clone()
total_model_stat_low = total_model.Clone()
for i in total_model.bins_range(overflow=True):
total_model_stat_high[i].value += total_model.yerrh(i)
total_model_stat_low[i].value -= total_model.yerrl(i)
var_high.append(total_model_stat_high)
var_low.append(total_model_stat_low)
# sum variations in quadrature bin-by-bin
high_band = total_model.Clone()
high_band.Reset()
low_band = high_band.Clone()
for i in high_band.bins_range(overflow=True):
sum_high = math.sqrt(
sum([(v[i].value - total_model[i].value)**2 for v in var_high]))
sum_low = math.sqrt(
sum([(v[i].value - total_model[i].value)**2 for v in var_low]))
high_band[i].value = sum_high
low_band[i].value = sum_low
return total_model, high_band, low_band
def draw(name,
category,
data=None,
data_info=None,
model=None,
model_colors=None,
signal=None,
signal_odd=None,
signal_scale=1.,
signal_on_top=False,
signal_linestyles=None,
signal_odd_linestyles=None,
signal_colors=None,
signal_odd_colors=None,
show_signal_error=False,
fill_signal=False,
stack_signal=True,
units=None,
plot_label=None,
ylabel='Events',
blind=False,
show_ratio=False,
ratio_range=(0, 2),
ratio_height=0.15,
ratio_margin=0.06,
output_formats=None,
systematics=None,
#systematics_components=None,
integer=False,
textsize=22,
logy=False,
logy_min=None,
separate_legends=False,
ypadding=None,
legend_position='right',
range=None,
output_name=None,
output_dir=PLOTS_DIR,
arrow_values=None,
overflow=True,
show_pvalue=False,
top_label=None,
poisson_errors=True ):
blind=True
if model is None and data is None and signal is None:
# insufficient input
raise ValueError(
"at least one of model, data, "
"or signal must be specified")
if model is not None:
if not isinstance(model, (list, tuple)):
model = [model]
if overflow:
for hist in model:
fold_overflow(hist)
if signal is not None:
if not isinstance(signal, (list, tuple)):
signal = [signal]
if overflow:
for hist in signal:
fold_overflow(hist)
if signal_odd is not None:
if not isinstance(signal_odd, (list, tuple)):
signal_odd = [signal_odd]
if overflow:
for hist in signal_odd:
fold_overflow(hist)
if data is not None and overflow:
fold_overflow(data)
# objects will be populated with all histograms in the main pad
objects = []
legends = []
if show_ratio and (data is None or model is None):
# cannot show the ratio if data or model was not specified
show_ratio=False
if ypadding is None:
# select good defaults for log or linear scales
if logy:
ypadding = (.6, .0)
else:
ypadding = (.35, .0)
template = data or model[0]
xdivisions = min(template.nbins(), 7) if integer else 507
if show_ratio:
fig = RatioPlot(
logy=logy,
ratio_title='Data / Model',
ratio_limits=(0, 2),
offset=-72,
ratio_margin=22,
prune_ratio_ticks=True)
else:
fig = SimplePlot(logy=logy)
if signal is not None:
if signal_scale != 1.:
scaled_signal = []
for sig in signal:
scaled_h = sig * signal_scale
scaled_h.SetTitle(r'%g #times %s' % (
signal_scale,
sig.GetTitle()))
scaled_signal.append(scaled_h)
else:
scaled_signal = signal
if signal_colors is not None:
set_colors(scaled_signal, signal_colors)
for i, s in enumerate(scaled_signal):
s.drawstyle = 'HIST'
if fill_signal:
s.fillstyle = 'solid'
s.fillcolor = s.linecolor
s.linewidth = 0
s.linestyle = 'solid'
alpha = .75
else:
s.fillstyle = 'hollow'
s.linewidth = 3
if signal_linestyles is not None:
s.linestyle = signal_linestyles[i]
else:
s.linestyle = 'solid'
alpha = 1.
if signal_odd is not None:
if signal_scale != 1.:
scaled_signal_odd = []
for sig in signal_odd:
scaled_h = sig * signal_scale
scaled_h.SetTitle(r'%g #times %s' % (
signal_scale,
sig.GetTitle()))
scaled_signal_odd.append(scaled_h)
else:
scaled_signal_odd = signal_odd
if signal_colors is not None:
set_colors(scaled_signal_odd, signal_odd_colors)
for i, s in enumerate(scaled_signal_odd):
s.drawstyle = 'HIST'
sum = s.GetSum();
s.SetBinContent(1,s.GetBinContent(1)*0.7);
s.SetBinContent(2,s.GetBinContent(2)*1.3);
if fill_signal:
s.fillstyle = 'dashed'
s.fillcolor = s.linecolor
s.linewidth = 0
s.linestyle = 'dashed'
alpha = .75
else:
s.fillstyle = 'hollow'
s.linewidth = 3
if signal_linestyles is not None:
s.linestyle = signal_linestyles[i]
else:
s.linestyle = 'dashed'
alpha = 1.
if model is not None:
if model_colors is not None:
set_colors(model, model_colors)
# create the model stack
model_stack = HistStack()
for hist in model:
hist.SetLineWidth(0)
hist.drawstyle = 'hist'
model_stack.Add(hist)
if signal is not None and signal_on_top:
for s in scaled_signal:
model_stack.Add(s)
if signal_odd is not None and signal_on_top:
signal_odd_stack = HistStack()
for s in scaled_signal_odd:
for hist in model:
s.Add(hist)
objects.extend(scaled_signal_odd)
objects.append(model_stack)
if signal is not None and not signal_on_top:
if stack_signal:
# create the signal stack
signal_stack = HistStack()
for hist in scaled_signal:
signal_stack.Add(hist)
objects.append(signal_stack)
else:
objects.extend(scaled_signal)
if signal_odd is not None and not signal_on_top:
if stack_signal:
# create the signal stack
signal_odd_stack = HistStack()
for hist in scaled_signal_odd:
signal_odd_stack.Add(hist)
objects.append(signal_odd_stack)
else:
objects.extend(scaled_signal_odd)
if model is not None:
# draw uncertainty band
total_model, high_band_model, low_band_model = uncertainty_band(
model, systematics) #, systematics_components)
high = total_model + high_band_model
low = total_model - low_band_model
error_band_model = rootpy_utils.get_band(
low, high,
middle_hist=total_model)
error_band_model.fillstyle = '/'
error_band_model.fillcolor = 13
error_band_model.linecolor = 10
error_band_model.markersize = 0
error_band_model.markercolor = 10
error_band_model.drawstyle = 'e2'
objects.append(error_band_model)
if signal is not None and show_signal_error:
total_signal, high_band_signal, low_band_signal = uncertainty_band(
signal, systematics) #, systematics_components)
high = (total_signal + high_band_signal) * signal_scale
low = (total_signal - low_band_signal) * signal_scale
if signal_on_top:
high += total_model
low += total_model
error_band_signal = rootpy_utils.get_band(
low, high,
middle_hist=total_signal * signal_scale)
error_band_signal.fillstyle = '\\'
error_band_signal.fillcolor = 13
error_band_signal.linecolor = 10
error_band_signal.markersize = 0
error_band_signal.markercolor = 10
error_band_signal.drawstyle = 'e2'
objects.append(error_band_signal)
if data is not None and blind is not True:
# create the data histogram
if isinstance(blind, tuple):
low, high = blind
# zero out bins in blind region
for bin in data.bins():
if (low < bin.x.high <= high or low <= bin.x.low < high):
data[bin.idx] = (0., 0.)
if poisson_errors:
# convert data to TGraphAsymmErrors with Poisson errors
data_poisson = data.poisson_errors()
data_poisson.markersize = 1.2
data_poisson.drawstyle = 'PZ'
objects.append(data_poisson)
else:
# Gaussian errors
data.drawstyle = 'E0'
objects.append(data)
# draw ratio plot
if model is not None and show_ratio:
fig.cd('ratio')
total_model = sum(model)
ratio_hist = Hist.divide(data, total_model)
# remove bins where data is zero
max_dev = 0
for bin in data.bins():
if bin.value <= 0:
ratio_hist[bin.idx] = (-100, 0)
else:
ratio_value = ratio_hist[bin.idx].value
dev = abs(ratio_value - 1)
if dev > max_dev:
max_dev = dev
if max_dev < 0.2:
ratio_range = (0.8, 1.2)
elif max_dev < 0.4:
ratio_range = (0.6, 1.4)
ruler_high = (ratio_range[1] + 1.) / 2.
ruler_low = (ratio_range[0] + 1.) / 2.
ratio_hist.linecolor = 'black'
ratio_hist.linewidth = 2
ratio_hist.fillstyle = 'hollow'
ratio_hist.drawstyle = 'E0'
"""
# draw empty copy of ratio_hist first so lines will show
ratio_hist_tmp = ratio_hist.Clone()
ratio_hist_tmp.Reset()
ratio_hist_tmp.Draw()
ratio_hist_tmp.yaxis.SetLimits(*ratio_range)
ratio_hist_tmp.yaxis.SetRangeUser(*ratio_range)
ratio_hist_tmp.yaxis.SetTitle('Data / Model')
ratio_hist_tmp.yaxis.SetNdivisions(4)
# not certain why the following is needed
ratio_hist_tmp.yaxis.SetTitleOffset(style.GetTitleYOffset())
ratio_xrange = range or ratio_hist.bounds()
ratio_hist_tmp.xaxis.SetLimits(*ratio_xrange)
#ratio_hist_tmp.xaxis.SetRangeUser(*ratio_xrange)
ratio_hist_tmp.xaxis.SetTickLength(
ratio_hist_tmp.xaxis.GetTickLength() * 2)
# draw ratio=1 line
line = Line(ratio_xrange[0], 1,
ratio_xrange[1], 1)
line.linestyle = 'dashed'
line.linewidth = 2
line.Draw()
# draw high ratio line
line_up = Line(ratio_xrange[0], ruler_high,
ratio_xrange[1], ruler_high)
line_up.linestyle = 'dashed'
line_up.linewidth = 2
line_up.Draw()
# draw low ratio line
line_dn = Line(ratio_xrange[0], ruler_low,
ratio_xrange[1], ruler_low)
line_dn.linestyle = 'dashed'
line_dn.linewidth = 2
line_dn.Draw()
"""
# draw band below points on ratio plot
ratio_hist_high = Hist.divide(
total_model + high_band_model, total_model)
ratio_hist_low = Hist.divide(
total_model - low_band_model, total_model)
fig.cd('ratio')
error_band = rootpy_utils.get_band(
ratio_hist_high, ratio_hist_low)
error_band.fillstyle = '/'
error_band.fillcolor = '#858585'
error_band.drawstyle = 'E2'
fig.draw('ratio', [error_band, ratio_hist], xdivisions=xdivisions)
if separate_legends:
fig.cd('main')
right_legend = Legend(len(signal) + 1 if signal is not None else 1,
pad=fig.pad('main'),
**legend_params('right', textsize))
right_legend.AddEntry(data, style='lep')
if signal is not None:
for s in reversed(scaled_signal):
right_legend.AddEntry(s, style='F' if fill_signal else 'L')
if signal_odd is not None:
for s in reversed(scaled_signal_odd):
right_legend.AddEntry(s, style='F' if fill_signal else 'L')
legends.append(right_legend)
if model is not None:
n_entries = len(model)
if systematics:
n_entries += 1
model_legend = Legend(n_entries,
pad=fig.pad('main'),
**legend_params('left', textsize))
for hist in reversed(model):
model_legend.AddEntry(hist, style='F')
if systematics:
model_err_band = error_band_model.Clone()
model_err_band.linewidth = 0
model_err_band.linecolor = 'white'
model_err_band.fillcolor = '#858585'
model_err_band.title = 'Uncert.'
model_legend.AddEntry(model_err_band, style='F')
legends.append(model_legend)
else:
n_entries = 1
if signal is not None:
n_entries += len(scaled_signal)
if model is not None:
n_entries += len(model)
if systematics:
n_entries += 1
fig.cd('main')
legend = Legend(
n_entries,
pad=fig.pad('main'),
**legend_params(legend_position, 20))
if data is not None:
legend.AddEntry(data, style='lep')
if signal is not None:
for s in reversed(scaled_signal):
legend.AddEntry(s, style='F' if fill_signal else 'L')
if signal_odd is not None:
for s in reversed(scaled_signal_odd):
legend.AddEntry(s, style='F' if fill_signal else 'L')
if model:
for hist in reversed(model):
legend.AddEntry(hist, style='F')
model_err_band = error_band_model.Clone()
model_err_band.linewidth = 0
model_err_band.linecolor = 'white'
model_err_band.fillcolor = '#858585'
model_err_band.title = 'Uncert.'
legend.AddEntry(model_err_band, style='F')
legends.append(legend)
# draw the objects
bounds = fig.draw('main', objects, ypadding=ypadding,
logy_crop_value=1E-1,
xdivisions=xdivisions)
xaxis, yaxis = fig.axes('main')
base_xaxis = xaxis
base_xaxis.range_user = template.bounds()
base_xaxis.limits = template.bounds()
xmin, xmax, ymin, ymax = bounds
if show_ratio:
base_xaxis = fig.axes('ratio')[0]
base_xaxis.range_user = template.bounds()
base_xaxis.limits = template.bounds()
# draw the legends
fig.cd('main')
for legend in legends:
legend.Draw()
label_plot(fig.pad('main'), template=template,
xaxis=base_xaxis, yaxis=yaxis,
xlabel=name, ylabel=ylabel, units=units,
category_label=category.label,
extra_label=plot_label,
extra_label_position='right' if legend_position == 'left' else 'left',
data_info=data_info)
if logy and logy_min is not None:
yaxis.min = logy_min
ymin = logy_min
# draw arrows
# if arrow_values is not None:
# arrow_top = ymin + (ymax - ymin) / 2.
# fig.cd('main')
# for value in arrow_values:
# arrow = Arrow(value, arrow_top, value, ymin, 0.05, '|>')
# arrow.SetAngle(30)
# arrow.SetLineWidth(2)
# arrow.Draw()
if show_pvalue and data is not None and model:
fig.cd('main')
total_model = sum(model)
# show p-value and chi^2
pvalue = total_model.Chi2Test(data, 'WW')
pvalue_label = ROOT.TLatex(
0.2, 0.97,
"p-value={0:.2f}".format(pvalue))
pvalue_label.SetNDC(True)
pvalue_label.SetTextFont(43)
pvalue_label.SetTextSize(16)
pvalue_label.Draw()
chi2 = total_model.Chi2Test(data, 'WW CHI2/NDF')
chi2_label = ROOT.TLatex(
0.38, 0.97,
"#chi^{{2}}/ndf={0:.2f}".format(chi2))
chi2_label.SetNDC(True)
chi2_label.SetTextFont(43)
chi2_label.SetTextSize(16)
chi2_label.Draw()
if top_label is not None:
fig.cd('main')
label = ROOT.TLatex(
fig.pad('main').GetLeftMargin() + 0.08, 0.97,
top_label)
label.SetNDC(True)
label.SetTextFont(43)
label.SetTextSize(16)
label.Draw()
if output_name is not None:
# create the output filename
filename = 'var_{0}_{1}'.format(
category.name,
output_name.lower().replace(' ', '_'))
if logy:
filename += '_logy'
filename += '_root'
if output_formats is None:
output_formats = ('png',)
# save the figure
save_canvas(fig, output_dir, filename, formats=output_formats)
return fig
| yukisakurai/hhana | mva/plotting/draw.py | Python | gpl-3.0 | 21,459 | [
"Gaussian"
] | 6962dd7d98e9450af38919e8dcd569a0e8151831db2bdae49a12077d72150f4f |
#!/usr/bin/env python
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# Written by Matt Chisholm and Greg Hazel
from __future__ import division
app_name = "BitTorrent"
import os
import sys
try:
from BitTorrent.translation import _
except ImportError, e:
if os.name == 'posix':
# Ugly Idiot-proofing -- this should stop ALL bug reports from
# people unable to run BitTorrent after installation on Debian
# and RedHat based systems.
pythonversion = sys.version[:3]
py24 = os.path.exists('/usr/lib/python2.4/site-packages/BitTorrent/')
py23 = os.path.exists('/usr/lib/python2.3/site-packages/BitTorrent/')
if not py24 and not py23:
print "There is no BitTorrent package installed on this system."
elif py24 and py23:
print """
There is more than one BitTorrent package installed on this system,
at least one under Python 2.3 and at least one under Python 2.4."""
else:
print """
A BitTorrent package for the wrong version of Python is installed on this
system. The default version of Python on this system is %s. However, the
BitTorrent package is installed under Python %s.""" % (pythonversion, (py24 and '2.4' or '2.3'))
print """
To install BitTorrent correctly you must first:
* Remove *all* versions of BitTorrent currently installed.
Then, you have two options:
* Download and install the .deb or .rpm package for
BitTorrent & Python %s
* Download the source .tar.gz and follow the directions for
installing under Python %s
Visit http://www.bittorrent.com/ to download BitTorrent.
""" % (pythonversion, pythonversion)
sys.exit(1)
else:
raise
import time
import BTL.stackthreading as threading
import logging
debug=True
#debug=True
from BTL import atexit_threads
assert sys.version_info >= (2, 3), _("Install Python %s or greater") % '2.3'
from BitTorrent import BTFailure, inject_main_logfile
from BitTorrent import configfile
from BTL.defer import DeferredEvent, wrap_task
from BitTorrent.defaultargs import get_defaults
from BitTorrent.IPC import ipc_interface
from BitTorrent.prefs import Preferences
from BitTorrent.RawServer_twisted import RawServer
if os.name == 'nt':
from BitTorrent.platform import win_version_num
from BitTorrent import zurllib
defaults = get_defaults('bittorrent')
defaults.extend((('donated' , '', ''), # the version that the user last donated for
('notified', '', ''), # the version that the user was last notified of
))
defconfig = dict([(name, value) for (name, value, doc) in defaults])
del name, value, doc
inject_main_logfile()
global_logger = logging.getLogger('')
rawserver = None
if __name__ == '__main__':
psyco = None
try:
# 95, 98, and ME seem to have problems with psyco
# so only import it on NT and up
# and only if we're not using python 2.5, becuase it's broken
if (os.name == 'nt' and win_version_num >= (2, 4, 0) and
sys.version_info < (2, 5)):
import psyco_BROKEN
import traceback
psyco.cannotcompile(traceback.print_stack)
psyco.cannotcompile(traceback.format_stack)
psyco.cannotcompile(traceback.extract_stack)
#psyco.full(memory=10)
psyco.bind(RawServer.listen_forever)
from BTL import sparse_set
psyco.bind(sparse_set.SparseSet)
from BitTorrent import PiecePicker
psyco.bind(PiecePicker.PieceBuckets)
psyco.bind(PiecePicker.PiecePicker)
from BitTorrent import PieceSetBuckets
psyco.bind(PieceSetBuckets.PieceSetBuckets)
psyco.bind(PieceSetBuckets.SortedPieceBuckets)
psyco.profile(memorymax=30000) # that's 30MB for the whole process
#psyco.log()
# see below for more
except ImportError:
pass
zurllib.add_unsafe_thread()
try:
config, args = configfile.parse_configuration_and_args(defaults,
'bittorrent', sys.argv[1:], 0, None)
if debug:
config['upnp'] = False
config['one_connection_per_ip'] = False
except BTFailure, e:
print unicode(e.args[0])
sys.exit(1)
config = Preferences().initWithDict(config)
# bug set in DownloadInfoFrame
rawserver = RawServer(config)
zurllib.set_zurllib_rawserver(rawserver)
rawserver.install_sigint_handler()
ipc = ipc_interface(rawserver, config, "controlsocket")
# make sure we clean up the ipc when everything is done
atexit_threads.register_verbose(ipc.stop)
# this could be on the ipc object
ipc_master = True
try:
if not config['use_factory_defaults']:
ipc.create()
except BTFailure, e:
ipc_master = False
try:
ipc.send_command('no-op')
if config['publish']:
assert len(args) == 1
ipc.send_command('publish_torrent', args[0], config['publish'])
sys.exit(0)
elif args:
for arg in args:
ipc.send_command('start_torrent', arg)
sys.exit(0)
ipc.send_command('show_error', _("%s already running")%app_name)
except BTFailure:
global_logger.error((_("Failed to communicate with another %s process "
"but one seems to be running.") % app_name) +
(_(" Closing all %s windows may fix the problem.")
% app_name))
sys.exit(1)
from BitTorrent.MultiTorrent import MultiTorrent
from BTL.ThreadProxy import ThreadProxy
from BitTorrent.TorrentButler import DownloadTorrentButler, SeedTorrentButler
from BitTorrent.AutoUpdateButler import AutoUpdateButler
from BitTorrent.GUI_wx.DownloadManager import MainLoop
from BitTorrent.GUI_wx import gui_wrap
def gmtime():
return time.mktime(time.gmtime())
if __name__ == '__main__':
#import memleak_detection
#memleak_detection.begin_sampling('memleak_sample.log')
if psyco:
psyco.bind(MainLoop.run)
config['start_time'] = gmtime()
mainloop = MainLoop(config)
def init_core(mainloop):
core_doneflag = DeferredEvent()
class UILogger(logging.Handler):
def emit(self, record):
msg = "[%s] %s" % (record.name, self.format(record))
gui_wrap(mainloop.do_log, record.levelno, msg)
logging.getLogger('').addHandler(UILogger())
try:
multitorrent = MultiTorrent(config, rawserver, config['data_dir'],
listen_fail_ok=True,
init_torrents=False)
# Butlers
multitorrent.add_policy(DownloadTorrentButler(multitorrent))
multitorrent.add_policy(SeedTorrentButler(multitorrent))
auto_update_butler = AutoUpdateButler(multitorrent, rawserver,
test_new_version=config['new_version'],
test_current_version=config['current_version'])
multitorrent.add_auto_update_policy(auto_update_butler)
# attach to the UI
tpm = ThreadProxy(multitorrent,
gui_wrap,
wrap_task(rawserver.external_add_task))
mainloop.attach_multitorrent(tpm, core_doneflag)
ipc.start(mainloop.external_command)
#rawserver.associate_thread()
# register shutdown action
def shutdown():
df = multitorrent.shutdown()
stop_rawserver = lambda r : rawserver.stop()
df.addCallbacks(stop_rawserver, stop_rawserver)
rawserver.add_task(0, core_doneflag.addCallback,
lambda r: rawserver.external_add_task(0, shutdown))
rawserver.listen_forever()
except:
# oops, we failed.
# one message for the log w/ exception info
global_logger.exception("BitTorrent core initialization failed!")
# one message for the user w/o info
global_logger.critical("BitTorrent core initialization failed!")
core_doneflag.set()
rawserver.stop()
try:
gui_wrap(mainloop.ExitMainLoop)
except:
pass
try:
gui_wrap(mainloop.doneflag.set)
except:
pass
raise
threading.Thread(target=init_core, args=(mainloop,)).start()
mainloop.append_external_torrents(*args)
## # cause memleak stuff to be imported
## import code
## import sizer
##
## from sizer import annotate
## from sizer import formatting
## from sizer import operations
## from sizer import rules
## from sizer import scanner
## from sizer import set
## from sizer import sizes
## from sizer import wrapper
try:
mainloop.run()
except KeyboardInterrupt:
# the gui main loop is closed in MainLoop
pass
| rabimba/p2pScrapper | BitTorrent-5.2.2/bittorrent.py | Python | mit | 9,860 | [
"VisIt"
] | fd5c4ece0071037bc2c5a6723eeb763b17a2869b5d80f2396cab0c6fadb1b035 |
"""Sutils: simulation unilities, some of them are molecule-specific (implemented as methods in subclasses)
"""
from config import *
import random, mdtraj as md
from coordinates_data_files_list import *
from sklearn.cluster import KMeans
from helper_func import *
from functools import reduce
class Sutils(object):
def __init__(self):
return
@staticmethod
def get_num_of_non_overlapping_hyperspheres_that_filled_explored_phase_space(
pdb_file_list, atom_selection, radius, step_interval=1, shuffle_list=True,
distance_metric='RMSD'):
"""
This functions is used to count how many non-overlapping hyperspheres are needed to fill the explored phase
space, to estimate volumn of explored region
:param atom_selection: atom selection statement for MDAnalysis
:param radius: radius of hyperspheres
:param distance_metric: distance metric of two frames
:return: number of hyperspheres
"""
if shuffle_list: random.shuffle(pdb_file_list)
index = 0
positions_list = []
for sample_file in pdb_file_list:
sample = Universe(sample_file)
sample_atom_selection = sample.select_atoms(atom_selection)
frame_index_list = list(range(sample.trajectory.n_frames))
if shuffle_list: random.shuffle(frame_index_list)
for item_index in frame_index_list:
sample.trajectory[item_index]
if index % step_interval == 0:
current_positions = sample_atom_selection.positions
distances_to_previous_frames = np.array(
[Sutils.get_RMSD_after_alignment(item, current_positions)
for item in positions_list])
if len(distances_to_previous_frames) == 0 or np.all(distances_to_previous_frames > radius):
# need to include a new hypershere
positions_list.append(current_positions)
index += 1
return len(positions_list), np.array(positions_list)
@staticmethod
def mark_and_modify_pdb_for_calculating_RMSD_for_plumed(pdb_file, out_pdb,
atom_index_list, start_idx, item_positions=None):
"""
:param pdb_file: input pdb
:param out_pdb: output reference pdb
:param atom_index_list: index list used to calculate RMSD
:param item_positions: reference positions of selected atoms, set it None if we do not want to modify positions
"""
indices = np.array(atom_index_list) - start_idx # explicitly specify start_idx, to avoid confusion
temp_sample = Universe(pdb_file)
temp_atoms = temp_sample.select_atoms('all')
if not item_positions is None:
item_positions = item_positions.reshape((item_positions.shape[0] // 3, 3))
temp_positions = temp_atoms.positions
temp_positions[indices] = item_positions
temp_atoms.positions = temp_positions
temp_bfactors = np.zeros(len(temp_atoms))
temp_bfactors[indices] = 1
temp_atoms.tempfactors = temp_bfactors
temp_atoms.occupancies = temp_bfactors
temp_atoms.write(out_pdb)
return out_pdb
@staticmethod
def get_plumed_script_that_generate_a_segment_connecting_two_configs(
pdb_1, pdb_2, atom_selection_statement, num_steps, force_constant):
"""
This function uses targeted MD to generate a segment connecting two configurations
:param pdb_1, pdb_2: two ends of segment
:param atom_selection_statement: atoms for calculating RMSD in targeted MD
"""
atom_list = get_index_list_with_selection_statement(pdb_1, atom_selection_statement)
ref_pdb = pdb_2.replace('.pdb', '_ref.pdb')
Sutils.mark_and_modify_pdb_for_calculating_RMSD_for_plumed(pdb_2, ref_pdb, atom_list, None)
rmsd_diff = Sutils.metric_RMSD_of_atoms([pdb_1], ref_file=ref_pdb,
atom_selection_statement=atom_selection_statement, step_interval=100)[0] # TODO: check units
plumed_script = """rmsd: RMSD REFERENCE=%s TYPE=OPTIMAL
restraint: MOVINGRESTRAINT ARG=rmsd AT0=%f STEP0=0 KAPPA0=%f AT1=0 STEP1=%d KAPPA1=%f
PRINT STRIDE=500 ARG=* FILE=COLVAR
""" % (ref_pdb, rmsd_diff, force_constant, num_steps, force_constant)
return plumed_script
@staticmethod
def prepare_output_Cartesian_coor_with_multiple_ref_structures(
folder_list,
alignment_coor_file_suffix_list,
scaling_factor
):
my_coor_data_obj = coordinates_data_files_list(list_of_dir_of_coor_data_files=folder_list)
coor_data_obj_input = my_coor_data_obj.create_sub_coor_data_files_list_using_filter_conditional(
lambda x: not 'aligned' in x)
assert (len(alignment_coor_file_suffix_list) == CONFIG_55)
coor_data_obj_output_list = [my_coor_data_obj.create_sub_coor_data_files_list_using_filter_conditional(
lambda x: item in x) for item in alignment_coor_file_suffix_list]
for item in range(len(alignment_coor_file_suffix_list)):
for _1, _2 in zip(coor_data_obj_input.get_list_of_coor_data_files(),
coor_data_obj_output_list[item].get_list_of_coor_data_files()):
assert (_2 == _1.replace('_coordinates.npy', alignment_coor_file_suffix_list[item])), (_2, _1)
output_data_set = np.concatenate([Sutils.remove_translation(item.get_coor_data(scaling_factor))
for item in coor_data_obj_output_list] , axis=1)
return output_data_set
@staticmethod
def select_representative_points(data_set, output_data_set):
# clustering, pick representative points for training, two purposes:
# 1. avoid that training results are too good for densely-sampled regions, but bad for others.
# 2. reduce computation cost
print ("selecting representative points...")
kmeans = KMeans(init='k-means++', n_clusters=min(CONFIG_59, output_data_set.shape[0]), n_init=10)
kmeans.fit(output_data_set)
indices_of_representative_points = np.array([np.where(kmeans.labels_ == ii)[0][0]
for ii in range(kmeans.n_clusters)])
return data_set[indices_of_representative_points], output_data_set[indices_of_representative_points]
@staticmethod
def create_subclass_instance_using_name(name):
return {'Alanine_dipeptide': Alanine_dipeptide(), 'Trp_cage': Trp_cage()}[name]
@staticmethod
def load_object_from_pkl_file(file_path):
return Helper_func.load_object_from_pkl_file(file_path)
@staticmethod
def write_some_frames_into_a_new_file_based_on_index_list_for_pdb_file_list(list_of_files, index_list, new_pdb_file_name):
print("note that order may not be preserved!")
remaining_index_list = index_list
for _1 in list_of_files:
remaining_index_list = Sutils.write_some_frames_into_a_new_file_based_on_index_list(_1, remaining_index_list, new_pdb_file_name)
# check number of frames to be correct
with open(new_pdb_file_name, 'r') as f_in:
content = f_in.read().strip().split('MODEL')[1:]
assert (len(content) == len(index_list)), (len(content), len(index_list))
return
@staticmethod
def write_some_frames_into_a_new_file_based_on_index_list(pdb_file_name, index_list, new_pdb_file_name=None,
overwrite=False):
if os.stat(pdb_file_name).st_size > 1000000000: raise Exception('file may be too large, try to use other tools')
if new_pdb_file_name is None:
new_pdb_file_name = pdb_file_name.strip().split('.pdb')[0] + '_someframes.pdb'
with open(pdb_file_name, 'r') as f_in:
content = [item for item in f_in.readlines() if (not 'REMARK' in item) and (not 'END\n' in item)]
content = ''.join(content)
content = content.split('MODEL')[1:] # remove header
num_of_frames_in_current_file = len(content)
index_for_this_file = [_2 for _2 in index_list if _2 < num_of_frames_in_current_file]
remaining_index_list = [_2 - num_of_frames_in_current_file for _2 in index_list if
_2 >= num_of_frames_in_current_file]
content_to_write = [content[_2] for _2 in index_for_this_file]
write_flag = 'w' if overwrite else 'a'
with open(new_pdb_file_name, write_flag) as f_out:
for item in content_to_write:
f_out.write("MODEL")
f_out.write(item)
return remaining_index_list
@staticmethod
def concat_first_frame_in_all_pdb_files(list_of_pdb_files, new_pdb_file_name):
for item in list_of_pdb_files:
Sutils.write_some_frames_into_a_new_file_based_on_index_list(item, [0], new_pdb_file_name)
return
@staticmethod
def write_some_frames_into_a_new_file(pdb_file_name, start_index, end_index, step_interval = 1, # start_index included, end_index not included
new_pdb_file_name=None, method=1):
print('writing frames of %s: [%d:%d:%d]...' % (pdb_file_name, start_index, end_index, step_interval))
if new_pdb_file_name is None:
new_pdb_file_name = pdb_file_name.strip().split('.pdb')[0] + '_frame_%d_%d_%d.pdb' % (start_index, end_index, step_interval)
if method == 0:
if os.stat(pdb_file_name).st_size > 1000000000: raise Exception('file may be too large, try to use other tools')
with open(pdb_file_name, 'r') as f_in:
content = [item for item in f_in.readlines() if (not 'REMARK' in item) and (not 'END\n' in item)]
content = ''.join(content)
content = content.split('MODEL')[1:] # remove header
if end_index == 0:
content_to_write = content[start_index::step_interval] # for selecting last few frames
else:
content_to_write = content[start_index:end_index:step_interval]
with open(new_pdb_file_name, 'w') as f_out:
for item in content_to_write:
f_out.write("MODEL")
f_out.write(item)
elif method == 1:
index = -1
with open(pdb_file_name, 'r') as f_in, open(new_pdb_file_name, 'w') as f_out:
for item in f_in:
if 'MODEL' in item: index += 1
if (not 'REMARK' in item) and (not 'END\n' in item) and (index % step_interval == 0) \
and (
(end_index != 0 and (start_index <= index < end_index))
or (end_index == 0 and index >= start_index)):
f_out.write(item)
return
@staticmethod
def data_augmentation(data_set, output_data_set, num_of_copies, is_output_reconstructed_Cartesian=True):
"""
assume that center of mass motion of data_set and output_data_set should be removed.
"""
assert (Sutils.check_center_of_mass_is_at_origin(data_set))
if is_output_reconstructed_Cartesian:
assert (Sutils.check_center_of_mass_is_at_origin(output_data_set))
num_of_data = data_set.shape[0]
output_data_set = np.array(output_data_set.tolist() * num_of_copies)
num_atoms = len(data_set[0]) // 3
data_set = data_set.reshape((num_of_data, num_atoms, 3))
temp_data_set = []
for _ in range(num_of_copies):
temp_data_set.append([Sutils.rotating_randomly_around_center_of_mass(x) for x in data_set])
data_set = np.concatenate(temp_data_set, axis=0)
data_set = data_set.reshape((num_of_copies * num_of_data, num_atoms * 3))
return data_set, output_data_set
@staticmethod
def check_center_of_mass_is_at_origin(result):
return Helper_func.check_center_of_mass_is_at_origin(result=result)
@staticmethod
def remove_translation(coords): # remove the translational degree of freedom
return Helper_func.remove_translation(coords=coords)
@staticmethod
def rotating_randomly_around_center_of_mass(coords):
axis_vector = np.random.uniform(0, 1, 3)
angle = np.random.uniform(0, 2 * np.pi)
return Sutils.rotating_around_center_of_mass(coords, axis_vector, angle)
@staticmethod
def rotating_around_center_of_mass(coords, axis_vector, angle):
center_of_mass = coords.mean(axis=0)
return Sutils.rotating_coordinates(coords, center_of_mass, axis_vector, angle)
@staticmethod
def rotating_coordinates(coords, fixed_coord, axis_vector, angle):
indices_atoms = list(range(len(coords)))
return Sutils.rotating_group_of_atoms(coords, indices_atoms, fixed_coord, axis_vector, angle)
@staticmethod
def rotating_group_of_atoms(coords, indices_atoms, fixed_coord, axis_vector, angle):
"""
:param coords: coordinates of all atoms
:param indices_atoms: indices of atoms to rotate
:param fixed_coord: coordinates of fixed point
:param axis_vector: rotation axis
:param angle: rotation angle
:return: coordinates of all atoms after rotation
"""
result = copy.deepcopy(coords) # avoid modifying original input
temp_coords = coords[indices_atoms] - fixed_coord # coordinates for rotation
temp_coords = np.array(temp_coords)
cos_value = np.cos(angle); sin_value = np.sin(angle)
axis_vector_length = np.sqrt(np.sum(np.array(axis_vector) ** 2))
ux = axis_vector[0] / axis_vector_length; uy = axis_vector[1] / axis_vector_length; uz = axis_vector[2] / axis_vector_length
rotation_matrix = np.array([[cos_value + ux ** 2 * (1 - cos_value),
ux * uy * (1 - cos_value) - uz * sin_value,
ux * uz * (1 - cos_value) + uy * sin_value],
[ux * uy * (1 - cos_value) + uz * sin_value,
cos_value + uy ** 2 * (1 - cos_value),
uy * uz * (1 - cos_value) - ux * sin_value],
[ux * uz * (1 - cos_value) - uy * sin_value,
uy * uz * (1 - cos_value) + ux * sin_value,
cos_value + uz ** 2 * (1 - cos_value)]])
result[indices_atoms] = np.dot(temp_coords, rotation_matrix) + fixed_coord
return result
@staticmethod
def _generate_coordinates_from_pdb_files(atom_index, file_path=CONFIG_12, format='npy'):
atom_index = [int(_1) for _1 in atom_index]
atom_index = np.array(atom_index) - 1 # note that atom index starts from 1
filenames = subprocess.check_output([
'find', file_path, '-name', '*.pdb', '-o', '-name', '*.dcd']).decode("utf-8").strip().split('\n')
output_file_list = []
for input_file in filenames:
output_file = input_file[:-4] + '_coordinates.' + format
output_file_list += [output_file]
if os.path.exists(output_file) and os.path.getmtime(input_file) < os.path.getmtime(output_file): # check modified time
print("coordinate file already exists: %s (remove previous one if needed)" % output_file)
else:
print('generating coordinates of ' + input_file)
mdxyz = md.load(input_file, top=CONFIG_62[0]).xyz
mdxyz = mdxyz[:, atom_index, :].reshape(mdxyz.shape[0], len(atom_index) * 3)
if format == 'txt': np.savetxt(output_file, mdxyz)
elif format == 'npy': np.save(output_file, mdxyz)
print("Done generating coordinates files\n")
return output_file_list
@staticmethod
def _get_plumed_script_with_pairwise_dis_as_input(index_atoms, scaling_factor):
return Plumed_helper.get_pairwise_dis(index_atoms, scaling_factor=scaling_factor,
unit_scaling=1.0, out_var_prefix='l_0_out_')
@staticmethod
def remove_water_mol_and_Cl_from_pdb_file(folder_for_pdb = CONFIG_12, preserve_original_file=True):
"""
This is used to remove water molecule from pdb file, purposes:
- save storage space
- reduce processing time of pdb file
"""
filenames = subprocess.check_output(['find', folder_for_pdb, '-name', '*.pdb']).decode("utf-8").split('\n')[:-1]
for item in filenames:
print('removing water molecules from pdb file: ' + item)
output_file = item[:-4] + '_rm_tmp.pdb'
is_line_removed_flag = False
with open(item, 'r') as f_in, open(output_file, 'w') as f_out:
for line in f_in:
if not 'HOH' in line and not 'CL' in line and not "NA" in line and not 'SPC' in line and not 'pseu' in line:
f_out.write(line)
else: is_line_removed_flag = True
if not preserve_original_file:
if is_line_removed_flag:
subprocess.check_output(['mv', output_file, item])
else:
subprocess.check_output(['rm', output_file])
print('Done removing water molecules from all pdb files!')
return
@staticmethod
def get_boundary_points(list_of_points,
range_of_PCs = CONFIG_26,
num_of_bins = CONFIG_10,
num_of_boundary_points = CONFIG_11,
is_circular_boundary = CONFIG_18,
preprocessing = True,
auto_range_for_histogram = CONFIG_39, # set the range of histogram based on min,max values in each dimension
reverse_sorting_mode = CONFIG_41 # whether we reverse the order of sorting of diff_with_neighbors values
):
"""
:param preprocessing: if True, then more weight is not linear, this would be better based on experience
"""
dimensionality = len(list_of_points[0])
list_of_points = list(zip(*list_of_points))
assert (len(list_of_points) == dimensionality)
if is_circular_boundary or not auto_range_for_histogram:
hist_matrix, edges = np.histogramdd(list_of_points, bins= num_of_bins * np.ones(dimensionality), range = range_of_PCs)
else:
temp_hist_range = [[min(item) - (max(item) - min(item)) / (num_of_bins - 2), max(item) + (max(item) - min(item)) / (num_of_bins - 2)]\
for item in list_of_points]
hist_matrix, edges = np.histogramdd(list_of_points, bins=num_of_bins * np.ones(dimensionality), range=temp_hist_range)
# following is the main algorithm to find boundary and holes
# simply find the points that are lower than average of its 4 neighbors
if preprocessing:
hist_matrix = np.array([[- np.exp(- y) for y in x] for x in hist_matrix]) # preprocessing process
if is_circular_boundary: # typically works for circular autoencoder
diff_with_neighbors = hist_matrix - 1.0 / (2 * dimensionality) \
* sum(
[np.roll(hist_matrix, 1, axis=x) + np.roll(hist_matrix, -1, axis=x) for x in list(range(dimensionality))]
)
else:
# TODO: code not concise and general enough, fix this later
diff_with_neighbors = np.zeros(hist_matrix.shape)
temp_1 = [list(range(item)) for item in hist_matrix.shape]
for grid_index in itertools.product(*temp_1):
neighbor_index_list = [(np.array(grid_index) + temp_2).astype(int) for temp_2 in np.eye(dimensionality)]
neighbor_index_list += [(np.array(grid_index) - temp_2).astype(int) for temp_2 in np.eye(dimensionality)]
neighbor_index_list = [x for x in neighbor_index_list if np.all(x >= 0) and np.all(x < num_of_bins)]
# print "grid_index = %s" % str(grid_index)
# print "neighbor_index_list = %s" % str(neighbor_index_list)
diff_with_neighbors[tuple(grid_index)] = hist_matrix[tuple(grid_index)] - np.average(
[hist_matrix[tuple(temp_2)] for temp_2 in neighbor_index_list]
)
# get grid centers
edge_centers = [0.5 * (np.array(x[1:]) + np.array(x[:-1])) for x in edges]
grid_centers = np.array(list(itertools.product(*edge_centers))) # "itertools.product" gives Cartesian/direct product of several lists
grid_centers = np.reshape(grid_centers, np.append(num_of_bins * np.ones(dimensionality), dimensionality).astype(int))
# print grid_centers
potential_centers = []
# now sort these grids (that has no points in it)
# based on total number of points in its neighbors
temp_seperate_index = []
for _ in range(dimensionality):
temp_seperate_index.append(list(range(num_of_bins)))
index_of_grids = list(itertools.product(
*temp_seperate_index
))
index_of_grids = [x for x in index_of_grids if diff_with_neighbors[x] < 0] # only apply to grids with diff_with_neighbors value < 0
sorted_index_of_grids = sorted(index_of_grids, key = lambda x: diff_with_neighbors[x]) # sort based on histogram, return index values
if reverse_sorting_mode:
sorted_index_of_grids.reverse()
for index in sorted_index_of_grids[:num_of_boundary_points]: # note index can be of dimension >= 2
temp_potential_center = [round(x, 2) for x in grid_centers[index]]
potential_centers.append(temp_potential_center)
return potential_centers
@staticmethod
def L_method(evaluation_values, num):
evaluation_values = np.array(evaluation_values)
num = np.array(num)
assert (evaluation_values.shape == num.shape)
min_weighted_err = float('inf')
optimal_num = 0
best_regr = None
for item in range(1, len(num) - 1):
y_left = evaluation_values[:item]
x_left = num[:item].reshape(item, 1)
y_right = evaluation_values[item - 1:]
x_right = num[item - 1:].reshape(len(num) - item + 1, 1)
regr_left = linear_model.LinearRegression()
regr_left.fit(x_left, y_left)
y_left_pred = regr_left.predict(x_left)
regr_right = linear_model.LinearRegression()
regr_right.fit(x_right, y_right)
y_right_pred = regr_right.predict(x_right)
err_left = mean_squared_error(y_left, y_left_pred)
err_right = mean_squared_error(y_right, y_right_pred)
weighted_err = (err_left * item + err_right * (len(num) - item + 1)) / (len(num) + 1)
if weighted_err < min_weighted_err:
optimal_num = num[item]
min_weighted_err = weighted_err
best_regr = [regr_left, regr_right]
x_data = np.linspace(min(num), max(num), 100).reshape(100, 1)
y_data_left = best_regr[0].predict(x_data)
y_data_right = best_regr[1].predict(x_data)
return optimal_num, x_data, y_data_left, y_data_right
@staticmethod
def get_RMSD_after_alignment(position_1, position_2):
return rmsd(position_1, position_2, center=True, superposition=True)
@staticmethod
def metric_RMSD_of_atoms(list_of_files, ref_file='../resources/1l2y.pdb', ref_index=0,
atom_selection_statement="name CA", step_interval=1):
"""
:param atom_selection_statement: could be either
- "name CA" for alpha-carbon atoms only
- "protein" for all atoms
- "backbone" for backbone atoms
- others: see more information here: https://pythonhosted.org/MDAnalysis/documentation_pages/selections.html
"""
ref = Universe(ref_file)
ref_atom_selection = ref.select_atoms(atom_selection_statement)
ref.trajectory[ref_index]
ref_positions = ref_atom_selection.positions
result_rmsd_of_atoms = []
index = 0
for sample_file in list_of_files:
sample = Universe(ref_file, sample_file)
sample_atom_selection = sample.select_atoms(atom_selection_statement)
for _ in sample.trajectory:
if index % step_interval == 0:
result_rmsd_of_atoms.append(Sutils.get_RMSD_after_alignment(ref_positions,
sample_atom_selection.positions))
index += 1
return np.array(result_rmsd_of_atoms)
@staticmethod
def get_positions_from_list_of_pdb(pdb_file_list, atom_selection_statement='name CA'):
positions = []
for sample_file in pdb_file_list:
sample = Universe(sample_file)
sample_atom_selection = sample.select_atoms(atom_selection_statement)
for _ in sample.trajectory:
positions.append(sample_atom_selection.positions)
return positions
@staticmethod
def get_RMSD_of_a_point_wrt_neighbors_in_PC_space_with_list_of_pdb(PCs, pdb_file_list, radius=0.1):
"""This function calculates RMSD of a configuration with respect to its neighbors in PC space,
the purpose is to see if similar structures (small RMSD) are projected to points close to each other
in PC space.
wrt = with respect to
"""
from sklearn.metrics.pairwise import euclidean_distances
positions = Sutils.get_positions_from_list_of_pdb(pdb_file_list)
pairwise_dis_in_PC = euclidean_distances(PCs)
neighbor_matrix = pairwise_dis_in_PC < radius
RMSD_diff_of_neighbors = np.zeros(neighbor_matrix.shape)
for ii in range(len(PCs)):
for jj in range(ii + 1, len(PCs)):
if neighbor_matrix[ii][jj]:
RMSD_diff_of_neighbors[ii, jj] = RMSD_diff_of_neighbors[jj, ii] \
= Sutils.get_RMSD_after_alignment(positions[ii], positions[jj])
average_RMSD_wrt_neighbors = [np.average([x for x in RMSD_diff_of_neighbors[ii] if x])
for ii in range(len(PCs))]
return average_RMSD_wrt_neighbors
@staticmethod
def get_pairwise_distance_matrices_of_selected_atoms(list_of_files, step_interval=1, atom_selection='name CA'):
distances_list = []
index = 0
for sample_file in list_of_files:
sample = Universe(sample_file)
sample_atom_selection = sample.select_atoms(atom_selection)
for _ in sample.trajectory:
if index % step_interval == 0:
distances_list.append(
distance_array(sample_atom_selection.positions, sample_atom_selection.positions))
index += 1
return np.array(distances_list)
@staticmethod
def get_non_repeated_pairwise_distance(list_of_files, step_interval=1, atom_selection='name CA'):
"""each element in this result is a list, not a matrix"""
dis_matrix_list = Sutils.get_pairwise_distance_matrices_of_selected_atoms(list_of_files, step_interval,
atom_selection)
num_atoms = dis_matrix_list[0].shape[0]
result = []
for mat in dis_matrix_list:
p_distances = []
for item_1 in range(num_atoms):
for item_2 in range(item_1 + 1, num_atoms):
p_distances += [mat[item_1][item_2]]
assert (len(p_distances) == num_atoms * (num_atoms - 1) // 2)
result += [p_distances]
return np.array(result)
@staticmethod
def get_non_repeated_pairwise_distance_from_pos_npy(pos_npy):
from sklearn.metrics.pairwise import pairwise_distances
num_atoms = pos_npy.shape[1] // 3
temp_pos_npy = pos_npy.reshape(pos_npy.shape[0], num_atoms, 3)
pairwise_dis = np.array([pairwise_distances(item, item) for item in temp_pos_npy])
temp_result = np.array(
[[item[_1][_2] for _1 in range(num_atoms) for _2 in range(_1 + 1, num_atoms)] for item in pairwise_dis])
return temp_result
@staticmethod
def get_residue_relative_position_list(sample_file):
sample = Universe(sample_file)
temp_heavy_atoms = sample.select_atoms('not name H*')
temp_CA_atoms = sample.select_atoms('name CA')
residue_relative_position_list = []
for _ in sample.trajectory:
temp_residue_relative_position_list = []
for temp_residue_index in sample.residues.resnums:
temp_residue_relative_position_list.append(
temp_heavy_atoms[temp_heavy_atoms.resnums == temp_residue_index].positions \
- temp_CA_atoms[temp_CA_atoms.resnums == temp_residue_index].positions)
residue_relative_position_list.append(temp_residue_relative_position_list)
return residue_relative_position_list
class Alanine_dipeptide(Sutils):
"""docstring for Alanine_dipeptide"""
def __init__(self):
super(Alanine_dipeptide, self).__init__()
return
@staticmethod
def get_cossin_from_a_coordinate(a_coordinate):
num_of_coordinates = len(list(a_coordinate)) // 3
a_coordinate = np.array(a_coordinate).reshape(num_of_coordinates, 3)
diff_coordinates = a_coordinate[1:num_of_coordinates, :] - a_coordinate[0:num_of_coordinates - 1,:] # bond vectors
diff_coordinates_1=diff_coordinates[0:num_of_coordinates-2,:];diff_coordinates_2=diff_coordinates[1:num_of_coordinates-1,:]
normal_vectors = np.cross(diff_coordinates_1, diff_coordinates_2)
normal_vectors_normalized = np.array([x / sqrt(np.dot(x,x)) for x in normal_vectors])
normal_vectors_normalized_1 = normal_vectors_normalized[0:num_of_coordinates-3, :]; normal_vectors_normalized_2 = normal_vectors_normalized[1:num_of_coordinates-2,:]
diff_coordinates_mid = diff_coordinates[1:num_of_coordinates-2] # these are bond vectors in the middle (remove the first and last one), they should be perpendicular to adjacent normal vectors
cos_of_angles = list(range(len(normal_vectors_normalized_1)))
sin_of_angles_vec = list(range(len(normal_vectors_normalized_1)))
sin_of_angles = list(range(len(normal_vectors_normalized_1))) # initialization
result = []
for index in range(len(normal_vectors_normalized_1)):
cos_of_angles[index] = np.dot(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles_vec[index] = np.cross(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles[index] = sqrt(np.dot(sin_of_angles_vec[index], sin_of_angles_vec[index])) * np.sign(sum(sin_of_angles_vec[index]) * sum(diff_coordinates_mid[index]))
result += [cos_of_angles[index], sin_of_angles[index]]
return result
@staticmethod
def get_many_cossin_from_coordinates(coordinates):
return list(map(Alanine_dipeptide.get_cossin_from_a_coordinate, coordinates))
@staticmethod
def get_many_cossin_from_coordinates_in_list_of_files(list_of_files, step_interval=1, format='npy'):
coordinates = []
for item in list_of_files:
temp_coordinates = Helper_func.load_npy(item, format=format)
# the result could be 1D or 2D numpy array, need further checking
if temp_coordinates.shape[0] != 0: # remove info from empty files
if len(temp_coordinates.shape) == 1: # if 1D numpy array, convert it to 2D array for consistency
temp_coordinates = temp_coordinates[:, None].T
coordinates += list(temp_coordinates)
coordinates = coordinates[::step_interval]
result = Alanine_dipeptide.get_many_cossin_from_coordinates(coordinates)
return result
@staticmethod
def get_many_dihedrals_from_coordinates_in_file (list_of_files):
# why we need to get dihedrals from a list of coordinate files?
# because we will probably need to plot other files outside self._list_of_coor_data_files
temp = Alanine_dipeptide.get_many_cossin_from_coordinates_in_list_of_files(list_of_files)
return Alanine_dipeptide.get_many_dihedrals_from_cossin(temp)
@staticmethod
def get_many_dihedrals_from_cossin(cossin):
result = []
for item in cossin:
assert (len(item) == 8)
temp_angle = []
for ii in range(4):
temp_angle += [np.arctan2(item[2 * ii + 1], item[2 * ii])]
result += [list(temp_angle)]
return result
@staticmethod
def generate_coordinates_from_pdb_files(path_for_pdb=CONFIG_12):
index_of_backbone_atoms = [str(item) for item in CONFIG_57[0]]
output_file_list = Sutils._generate_coordinates_from_pdb_files(index_of_backbone_atoms, file_path=path_for_pdb)
return output_file_list
@staticmethod
def get_expression_script_for_plumed(scaling_factor=CONFIG_49):
index_of_backbone_atoms = CONFIG_57[0]
return Plumed_helper.get_atom_positions(index_of_backbone_atoms, scaling_factor, unit_scaling=1.0)
class Trp_cage(Sutils):
"""docstring for Trp_cage"""
def __init__(self):
super(Trp_cage, self).__init__()
return
@staticmethod
def get_cossin_of_a_dihedral_from_four_atoms(coord_1, coord_2, coord_3, coord_4):
"""each parameter is a 3D Cartesian coordinates of an atom"""
coords_of_four = np.array([coord_1, coord_2, coord_3, coord_4])
num_of_coordinates = 4
diff_coordinates = coords_of_four[1:num_of_coordinates, :] - coords_of_four[0:num_of_coordinates - 1,:] # bond vectors
diff_coordinates_1=diff_coordinates[0:num_of_coordinates-2,:];diff_coordinates_2=diff_coordinates[1:num_of_coordinates-1,:]
normal_vectors = np.cross(diff_coordinates_1, diff_coordinates_2)
normal_vectors_normalized = np.array([x / sqrt(np.dot(x,x)) for x in normal_vectors])
normal_vectors_normalized_1 = normal_vectors_normalized[0:num_of_coordinates-3, :]; normal_vectors_normalized_2 = normal_vectors_normalized[1:num_of_coordinates-2,:]
diff_coordinates_mid = diff_coordinates[1:num_of_coordinates-2] # these are bond vectors in the middle (remove the first and last one), they should be perpendicular to adjacent normal vectors
index = 0
cos_of_angle = np.dot(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angle_vec = np.cross(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
if sin_of_angle_vec[0] != 0 and diff_coordinates_mid[index][0] != 0:
component_index = 0
elif sin_of_angle_vec[1] != 0 and diff_coordinates_mid[index][1] != 0:
component_index = 1
else:
component_index = 2
sin_of_angle = sqrt(np.dot(sin_of_angle_vec, sin_of_angle_vec)) * np.sign(sin_of_angle_vec[component_index] * diff_coordinates_mid[index][component_index])
try:
assert ( cos_of_angle ** 2 + sin_of_angle ** 2 - 1 < 0.0001)
except:
print("error: cos^2 x+ sin^2 x != 1, it is %f" %(cos_of_angle ** 2 + sin_of_angle ** 2))
# print ("coordinates of four atoms are:")
# print (coords_of_four)
return [cos_of_angle, sin_of_angle]
@staticmethod
def get_coordinates_of_atom_with_index(a_coodinate, index):
""":param a_coodinate is coordinate of all 20 atoms"""
return [a_coodinate[3 * index], a_coodinate[3 * index + 1], a_coodinate[3 * index + 2]]
@staticmethod
def get_cossin_from_a_coordinate(a_coordinate):
total_num_of_residues = 20
list_of_idx_four_atoms = [[[3 * x - 1, 3 * x, 3 * x + 1, 3 * x + 2],
[3 * x, 3 * x + 1, 3 * x + 2, 3 * x + 3]] for x in list(range(total_num_of_residues))]
list_of_idx_four_atoms = reduce(lambda x, y: x + y, list_of_idx_four_atoms)
list_of_idx_four_atoms = [x for x in list_of_idx_four_atoms if x[0] >= 0 and x[3] < 3 * total_num_of_residues]
assert (len(list_of_idx_four_atoms) == 38)
result = []
for item in list_of_idx_four_atoms:
parameter_list = [Trp_cage.get_coordinates_of_atom_with_index(a_coordinate, x) for x in item]
[cos_value, sin_value] = Trp_cage.get_cossin_of_a_dihedral_from_four_atoms(*parameter_list)
# print(item)
# print(cos_value, sin_value)
result += [cos_value, sin_value]
return result
@staticmethod
def get_many_cossin_from_coordinates(coordinates):
return list(map(Trp_cage.get_cossin_from_a_coordinate, coordinates))
@staticmethod
def get_many_cossin_from_coordinates_in_list_of_files(list_of_files, step_interval=1, format='npy'):
coordinates = []
for item in list_of_files:
temp_coordinates = Helper_func.load_npy(item, format=format) # the result could be 1D or 2D numpy array, need further checking
if temp_coordinates.shape[0] != 0: # remove info from empty files
if len(temp_coordinates.shape) == 1: # if 1D numpy array, convert it to 2D array for consistency
temp_coordinates = temp_coordinates[:, None].T
coordinates += list(temp_coordinates)
coordinates = coordinates[::step_interval]
result = Trp_cage.get_many_cossin_from_coordinates(coordinates)
return result
@staticmethod
def get_many_dihedrals_from_coordinates_in_file (list_of_files, step_interval=1):
# why we need to get dihedrals from a list of coordinate files?
# because we will probably need to plot other files outside self._list_of_coor_data_files
temp = Trp_cage.get_many_cossin_from_coordinates_in_list_of_files(list_of_files, step_interval)
return Trp_cage.get_many_dihedrals_from_cossin(temp)
@staticmethod
def get_many_dihedrals_from_cossin(cossin):
result = []
for item in cossin:
temp_angle = []
len_of_cos_sin = 76
assert (len(item) == len_of_cos_sin), (len(item), len_of_cos_sin)
for idx_of_angle in range(len_of_cos_sin // 2):
temp_angle += [np.arctan2(item[2 * idx_of_angle + 1], item[2 * idx_of_angle])]
assert (len(temp_angle) == len_of_cos_sin // 2)
result += [temp_angle]
assert (len(result) == len(cossin))
return result
@staticmethod
def generate_coordinates_from_pdb_files(path_for_pdb = CONFIG_12):
index_of_backbone_atoms = [str(item) for item in CONFIG_57[1]]
assert (len(index_of_backbone_atoms) % 3 == 0)
output_file_list = Sutils._generate_coordinates_from_pdb_files(index_of_backbone_atoms, file_path=path_for_pdb)
return output_file_list
@staticmethod
def metric_get_diff_pairwise_distance_matrices_of_alpha_carbon(list_of_files, ref_file ='../resources/1l2y.pdb', step_interval = 1):
ref = Trp_cage.get_pairwise_distance_matrices_of_selected_atoms([ref_file])
sample = Trp_cage.get_pairwise_distance_matrices_of_selected_atoms(list_of_files, step_interval)
diff = [np.linalg.norm(ref[0] - x) for x in sample]
return diff
@staticmethod
def metric_get_residue_9_16_salt_bridge_distance(list_of_files, step_interval = 1):
distances_list = []
index = 0
for sample_file in list_of_files:
sample = Universe(sample_file)
sample_atom_selection_1 = sample.select_atoms("name OD2 and resid 9")
sample_atom_selection_2 = sample.select_atoms("name NH2 and resid 16")
for _ in sample.trajectory:
if index % step_interval == 0:
distances_list.append(
distance_array(sample_atom_selection_1.positions, sample_atom_selection_2.positions))
index += 1
return np.array(distances_list).flatten()
@staticmethod
def metric_chirality(list_of_files, step_interval=1):
result = []
index = 0
for temp_file in list_of_files:
temp_universe = Universe(temp_file)
for _ in temp_universe.trajectory:
if index % step_interval == 0:
atom_list = [temp_universe.select_atoms('name CA and resid %d' % item).positions[0]
for item in [1, 9, 14, 20]]
result.append(Trp_cage.get_cossin_of_a_dihedral_from_four_atoms(
atom_list[0], atom_list[1], atom_list[2], atom_list[3])[1])
index += 1
return np.array(result)
@staticmethod
def metric_vertical_shift(list_of_files, step_interval=1):
result = []
index = 0
for temp_file in list_of_files:
temp_universe = Universe(temp_file)
for _ in temp_universe.trajectory:
if index % step_interval == 0:
atom_list = [temp_universe.select_atoms('name CA and resid %d' % item).positions[0]
for item in [1, 11, 20]]
result.append(np.linalg.norm(atom_list[0] - atom_list[1]) - np.linalg.norm(atom_list[2] - atom_list[1]))
index += 1
return np.array(result)
@staticmethod
def metric_get_number_of_native_contacts(list_of_files, ref_file ='../resources/1l2y.pdb', threshold = 8, step_interval = 1):
ref = Trp_cage.get_pairwise_distance_matrices_of_selected_atoms([ref_file])
sample = Trp_cage.get_pairwise_distance_matrices_of_selected_atoms(list_of_files, step_interval)
result = [sum(sum(((x < threshold) & (ref[0] < threshold)).astype(int))) for x in sample]
return result
@staticmethod
def metric_radius_of_gyration(list_of_files, step_interval = 1, atom_selection_statement = "name CA"):
result = []
index = 0
for item_file in list_of_files:
temp_sample = Universe(item_file)
temp_atoms = temp_sample.select_atoms(atom_selection_statement)
for _ in temp_sample.trajectory:
if index % step_interval == 0:
result.append(temp_atoms.radius_of_gyration())
index += 1
return result
@staticmethod
def get_pairwise_RMSD_after_alignment_for_a_file(sample_file, atom_selection_statement = 'name CA'):
sample_1 = Universe(sample_file); sample_2 = Universe(sample_file) # should use two variables here, otherwise it will be 0, might be related to iterator issue?
sel_1 = sample_1.select_atoms(atom_selection_statement); sel_2 = sample_2.select_atoms(atom_selection_statement)
return [[rmsd(sel_1.positions, sel_2.positions, center=True, superposition=True) for _2 in sample_2.trajectory] for _1 in sample_1.trajectory]
@staticmethod
def structure_clustering_in_a_file(sample_file, atom_selection_statement = 'name CA',
write_most_common_class_into_file = False,
output_file_name = None,
eps=0.5,
min_num_of_neighboring_samples = 2
):
pairwise_RMSD = Trp_cage.get_pairwise_RMSD_after_alignment_for_a_file(sample_file, atom_selection_statement=atom_selection_statement)
from sklearn.cluster import DBSCAN
dbscan_obj = DBSCAN(metric='precomputed', eps=eps, min_samples=min_num_of_neighboring_samples).fit(pairwise_RMSD)
class_labels = dbscan_obj.labels_
max_class_label = max(class_labels)
num_in_each_class = {label: np.where(class_labels == label)[0].shape[0] for label in range(-1, max_class_label + 1)}
most_common_class_labels = sorted(list(num_in_each_class.keys()), key=lambda x: num_in_each_class[x], reverse=True)
with open(sample_file, 'r') as in_file:
content = [item for item in in_file.readlines() if not 'REMARK' in item]
content = ''.join(content)
content = content.split('MODEL')[1:] # remove header
assert (len(content) == len(class_labels))
if most_common_class_labels[0] == -1:
raise Exception("too many outliers, check if there is actually a cluster, or adjust parameters")
else:
index_of_most_common_class = np.where(class_labels == most_common_class_labels[0])[0]
if write_most_common_class_into_file:
if output_file_name is None:
output_file_name = sample_file.replace('.pdb', '_most_common.pdb')
frames_to_use = [content[ii] for ii in index_of_most_common_class]
with open(output_file_name, 'w') as out_file:
for frame in frames_to_use:
out_file.write("MODEL" + frame)
return num_in_each_class, index_of_most_common_class, most_common_class_labels[0]
@staticmethod
def rotating_dihedral_angles_and_save_to_pdb(input_pdb, target_dihedrals, output_pdb):
pdb_parser = PDB.PDBParser(QUIET=True)
temp_structure = pdb_parser.get_structure('temp', input_pdb)
coor_file = Trp_cage.generate_coordinates_from_pdb_files(input_pdb)[0]
current_dihedrals = Trp_cage.get_many_dihedrals_from_coordinates_in_file([coor_file])
rotation_angles = np.array(target_dihedrals) - np.array(current_dihedrals)
atom_indices_in_each_residue = [[]] * 20
temp_model = list(temp_structure.get_models())[0]
for _1, item in list(enumerate(temp_model.get_residues())):
atom_indices_in_each_residue[_1] = [int(_2.get_serial_number()) - 1 for _2 in item.get_atoms()]
for temp_model in temp_structure.get_models():
atoms_in_this_frame = list(temp_model.get_atoms())
temp_coords = np.array([_1.get_coord() for _1 in atoms_in_this_frame])
for item in range(19): # 19 * 2 = 38 dihedrals in total
C_atom_in_this_residue = list(filter(lambda x: x.get_name() == "C", atoms_in_this_frame))[item]
CA_atom_in_this_residue = list(filter(lambda x: x.get_name() == "CA", atoms_in_this_frame))[item]
CA_atom_in_next_residue = list(filter(lambda x: x.get_name() == "CA", atoms_in_this_frame))[item + 1]
N_atom_in_next_residue = list(filter(lambda x: x.get_name() == "N", atoms_in_this_frame))[item + 1]
axis_vector_0 = C_atom_in_this_residue.get_coord() - CA_atom_in_this_residue.get_coord()
axis_vector_1 = CA_atom_in_next_residue.get_coord() - N_atom_in_next_residue.get_coord()
fixed_coord_0 = temp_coords[int(C_atom_in_this_residue.get_serial_number()) - 1]
fixed_coord_1 = temp_coords[int(N_atom_in_next_residue.get_serial_number()) - 1]
indices_atom_to_rotate = reduce(lambda x, y: x + y, atom_indices_in_each_residue[:item + 1])
temp_coords = Sutils.rotating_group_of_atoms(temp_coords, indices_atom_to_rotate, fixed_coord_0,
axis_vector_0, rotation_angles[temp_model.get_id()][2 * item])
temp_coords = Sutils.rotating_group_of_atoms(temp_coords, indices_atom_to_rotate, fixed_coord_1,
axis_vector_1, rotation_angles[temp_model.get_id()][2 * item + 1])
# save coordinates into structure
for _1, item in enumerate(temp_model.get_atoms()):
item.set_coord(temp_coords[_1])
io = PDB.PDBIO()
io.set_structure(temp_structure)
io.save(output_pdb)
return
@staticmethod
def get_expression_script_for_plumed(scaling_factor=CONFIG_49):
index_of_backbone_atoms = CONFIG_57[1]
return Plumed_helper.get_atom_positions(index_of_backbone_atoms, scaling_factor, unit_scaling=1.0)
| weiHelloWorld/accelerated_sampling_with_autoencoder | MD_simulation_on_alanine_dipeptide/current_work/src/molecule_spec_sutils.py | Python | mit | 49,057 | [
"MDAnalysis",
"MDTraj"
] | 8a8c5b33e34f5a3498d7d9423f8ebfc7e2640362810f1f9fef284e631525f1e4 |
"""Provide dependency graph"""
import itertools
from collections import deque
from UserDict import IterableUserDict
try:
import z3
except ImportError:
pass
import miasm2.expression.expression as m2_expr
from miasm2.core.graph import DiGraph
from miasm2.core.asmbloc import asm_label, expr_is_label
from miasm2.expression.simplifications import expr_simp
from miasm2.ir.symbexec import symbexec
from miasm2.ir.ir import irbloc
from miasm2.ir.translators import Translator
class DependencyNode(object):
"""Node elements of a DependencyGraph
A dependency node stands for the dependency on the @element at line number
@line_nb in the IRblock named @label, *before* the evaluation of this
line.
"""
__slots__ = ["_label", "_element", "_line_nb", "_modifier",
"_step", "_nostep_repr", "_hash"]
def __init__(self, label, element, line_nb, step, modifier=False):
"""Create a dependency node with:
@label: asm_label instance
@element: Expr instance
@line_nb: int
@modifier: bool
"""
self._label = label
self._element = element
self._line_nb = line_nb
self._modifier = modifier
self._step = step
self._nostep_repr = (self._label, self._line_nb, self._element)
self._hash = hash(
(self._label, self._element, self._line_nb, self._step))
def __hash__(self):
"""Returns a hash of @self to uniquely identify @self"""
return self._hash
def __eq__(self, depnode):
"""Returns True if @self and @depnode are equals.
The attribute 'step' is not considered in the comparison.
"""
if not isinstance(depnode, self.__class__):
return False
return (self.label == depnode.label and
self.element == depnode.element and
self.line_nb == depnode.line_nb and
self.step == depnode.step)
def __cmp__(self, node):
"""Compares @self with @node. The step attribute is not taken into
account in the comparison.
"""
if not isinstance(node, self.__class__):
raise ValueError("Compare error between %s, %s" % (self.__class__,
node.__class__))
return cmp((self.label, self.element, self.line_nb),
(node.label, node.element, node.line_nb))
def __str__(self):
"""Returns a string representation of DependencyNode"""
return "<%s %s %s %s M:%s S:%s>" % (self.__class__.__name__,
self.label.name, self.element,
self.line_nb, self.modifier,
self.step)
def __repr__(self):
"""Returns a string representation of DependencyNode"""
return self.__str__()
@property
def nostep_repr(self):
"""Returns a representation of @self ignoring the step attribute"""
return self._nostep_repr
@property
def label(self):
"Name of the current IRBlock"
return self._label
@property
def element(self):
"Current tracked Expr"
return self._element
@property
def line_nb(self):
"Line in the current IRBlock"
return self._line_nb
@property
def step(self):
"Step of the current node"
return self._step
@property
def modifier(self):
"""Evaluating the current line involves a modification of tracked
dependencies"""
return self._modifier
@modifier.setter
def modifier(self, value):
"""Evaluating the current line involves a modification of tracked
dependencies if @value.
@value: boolean"""
self._modifier = value
class CacheWrapper(IterableUserDict):
"""Wrapper class for cache dictionnary"""
def __init__(self, dct=None):
"""Create a CacheWrapper with value @dct."""
IterableUserDict.__init__(self, dct)
self._nostep_cache = None
self._nostep_keys = None
def __eq__(self, cache):
"""Returns True if the nostep caches are equals"""
if self.nostep_keys != cache.nostep_keys:
return False
return self.nostep_cache == cache.nostep_cache
@property
def nostep_keys(self):
"""List of dictonnary keys without the step attribute.
The list is generated once when the method is called and not updated
afterward.
"""
if self._nostep_keys is None:
self._nostep_keys = set(key.nostep_repr for key in self.data)
return self._nostep_keys
@property
def nostep_cache(self):
"""Dictionnary of DependencyNode and their dependencies,
without the step attribute.
The dictionnary is generated once when the method is called for the
first time and not updated afterward.
"""
if self._nostep_cache is None:
self._nostep_cache = {}
for (node, values) in self.data.iteritems():
self._nostep_cache.setdefault(node.nostep_repr, set()).update(
set(val.nostep_repr for val in values))
return self._nostep_cache
class DependencyDict(object):
"""Internal structure for the DependencyGraph algorithm"""
__slots__ = ["_label", "_history", "_pending", "_cache"]
def __init__(self, label, history):
"""Create a DependencyDict
@label: asm_label, current IRblock label
@history: list of DependencyDict
"""
self._label = label
self._history = history
self._pending = set()
# DepNode -> set(DepNode)
self._cache = CacheWrapper()
def __eq__(self, depdict):
if not isinstance(depdict, self.__class__):
return False
return (self._label == depdict.label and
self.cache == depdict.cache)
def __cmp__(self, depdict):
if not isinstance(depdict, self.__class__):
raise ValueError("Compare error %s != %s" % (self.__class__,
depdict.__class__))
return cmp((self._label, self._cache, self._pending),
(depdict.label, depdict.cache, depdict.pending))
def is_head(self, depnode):
"""Return True iff @depnode is at the head of the current block
@depnode: DependencyNode instance"""
return (self.label == depnode.label and
depnode.line_nb == 0)
def copy(self):
"Return a copy of itself"
# Initialize
new_history = list(self.history)
depdict = DependencyDict(self.label, new_history)
# Copy values
for key, values in self.cache.iteritems():
depdict.cache[key] = set(values)
depdict.pending.update(self.pending)
return depdict
def extend(self, label):
"""Return a copy of itself, with itself in history and pending clean
@label: asm_label instance for the new DependencyDict's label
"""
depdict = DependencyDict(label, list(self.history) + [self])
for key, values in self.cache.iteritems():
depdict.cache[key] = set(values)
return depdict
def heads(self):
"""Return an iterator on the list of heads as defined in 'is_head'"""
for key in self.cache:
if self.is_head(key):
yield key
@property
def label(self):
"Label of the current block"
return self._label
@property
def history(self):
"""List of DependencyDict needed to reach the current DependencyDict
The first is the oldest"""
return self._history
@property
def cache(self):
"Dictionnary of DependencyNode and their dependencies"
return self._cache
@property
def pending(self):
"""Dictionnary of DependencyNode and their dependencies, waiting for
resolution"""
return self._pending
def _get_modifiers_in_cache(self, nodes_heads):
"""Find modifier nodes in cache starting from @nodes_heads.
Returns new cache"""
# 'worklist_depnode' order is needed (depth first)
worklist_depnodes = list(nodes_heads)
# Temporary cache
cache = {}
# Partially resolved 'cache' elements
worklist = []
# Build worklist and cache for non modifiers
while worklist_depnodes:
depnode = worklist_depnodes.pop()
# Resolve node dependencies
if depnode in cache:
# Depnode previously resolved
continue
if depnode not in self._cache:
# Final node
if not depnode.modifier:
cache[depnode] = []
continue
# Propagate to son
dependencies = self._cache[depnode]
for son in dependencies:
worklist_depnodes.append(son)
# Save partially resolved dependency
worklist.append((depnode, dependencies))
# Convert worklist to cache
while worklist:
depnode, dependencies = worklist.pop()
parallels = []
for node in dependencies:
if node.modifier:
parallels.append([node])
else:
parallels.append(cache[node])
out = set()
for parallel in itertools.product(*[p for p in parallels if p]):
out.update(parallel)
cache[depnode] = out
return cache
def clean_modifiers_in_cache(self, node_heads):
"""Remove intermediary states (non modifier depnodes) in the internal
cache values"""
self._cache = CacheWrapper(self._get_modifiers_in_cache(node_heads))
def _build_depgraph(self, depnode):
"""Recursively build the final list of DiGraph, and clean up unmodifier
nodes
@depnode: starting node
"""
if depnode not in self._cache or \
not self._cache[depnode]:
# There is no dependency
graph = DiGraph()
graph.add_node(depnode)
return graph
# Recursion
dependencies = list(self._cache[depnode])
graphs = []
for sub_depnode in dependencies:
graphs.append(self._build_depgraph(sub_depnode))
# head(graphs[i]) == dependencies[i]
graph = DiGraph()
graph.add_node(depnode)
for head in dependencies:
graph.add_uniq_edge(head, depnode)
for subgraphs in itertools.product(graphs):
for sourcegraph in subgraphs:
for node in sourcegraph.nodes():
graph.add_node(node)
for edge in sourcegraph.edges():
graph.add_uniq_edge(*edge)
# Update the running queue
return graph
def as_graph(self, starting_nodes):
"""Return a DiGraph corresponding to computed dependencies, with
@starting_nodes as leafs
@starting_nodes: set of DependencyNode instance
"""
# Build subgraph for each starting_node
subgraphs = []
for starting_node in starting_nodes:
subgraphs.append(self._build_depgraph(starting_node))
# Merge subgraphs into a final DiGraph
graph = DiGraph()
for sourcegraph in subgraphs:
for node in sourcegraph.nodes():
graph.add_node(node)
for edge in sourcegraph.edges():
graph.add_uniq_edge(*edge)
return graph
def filter_used_nodes(self, node_heads):
"""Keep only depnodes which are in the path of @node_heads in the
internal cache
@node_heads: set of DependencyNode instance
"""
# Init
todo = set(node_heads)
used_nodes = set()
# Map
while todo:
node = todo.pop()
if node in used_nodes:
continue
used_nodes.add(node)
if not node in self._cache:
continue
for sub_node in self._cache[node]:
todo.add(sub_node)
# Remove unused elements
for key in list(self._cache.keys()):
if key not in used_nodes:
del self._cache[key]
def filter_unmodifier_loops(self, implicit, irdst):
"""
Remove unmodifier node creating dependency loops over
pending elements in cache.
@implicit: boolean
@irdst: ExprId instance of IRDst register
"""
previous_dict = None
# Get pending nodes of last time the label was handled
for hist_dict in reversed(self.history):
if hist_dict.label == self.label:
previous_dict = hist_dict
break
if not previous_dict:
return
nostep_pending = [node.nostep_repr for node in self.pending]
to_remove = set()
for depnode in previous_dict.pending:
if (depnode.nostep_repr not in nostep_pending or
implicit and depnode.element == irdst):
continue
to_remove.update(self._non_modifier_in_loop(depnode))
# Replace unused keys by previous ones
for key in to_remove:
if depnode.nostep_repr == key.nostep_repr:
self._cache[depnode] = self._cache.get(key, set()).copy()
self.pending.discard(key)
self.pending.add(depnode)
# Replace occurences of key to remove
for dependencies in self._cache.itervalues():
if key in dependencies:
dependencies.remove(key)
dependencies.add(depnode)
if self._cache.has_key(key):
del self._cache[key]
def _non_modifier_in_loop(self, depnode):
"""
Walk from @depnode until a node with the same nostep_repr is
encountered.
Returns a set of unmodifier nodes met in the path if no modifier was
found.
Returns set() if there exist a modifier node on the path.
"""
if not self.cache.has_key(depnode):
return set()
# Init
todo = set(self.cache[depnode])
unmodifier_nodes = []
# Map
while todo:
node = todo.pop()
if node in unmodifier_nodes:
continue
if node.modifier:
return set()
unmodifier_nodes.append(node)
if not node in self._cache:
continue
if node.nostep_repr == depnode.nostep_repr:
unmodifier_nodes.append(node)
break
for sub_node in self._cache[node]:
todo.add(sub_node)
return unmodifier_nodes
class DependencyResult(object):
"""Container and methods for DependencyGraph results"""
def __init__(self, ira, final_depdict, input_depnodes):
"""Instance a DependencyResult
@ira: IRAnalysis instance
@final_depdict: DependencyDict instance
@input_depnodes: set of DependencyNode instance
"""
# Store arguments
self._ira = ira
self._depdict = final_depdict
self._input_depnodes = input_depnodes
# Init lazy elements
self._graph = None
self._has_loop = None
@property
def graph(self):
"""Returns a DiGraph instance representing the DependencyGraph"""
if self._graph is None:
self._graph = self._depdict.as_graph(self._input_depnodes)
return self._graph
@property
def history(self):
"""List of depdict corresponding to the blocks encountered in the
analysis"""
return list(self._depdict.history) + [self._depdict]
@property
def unresolved(self):
"""Set of nodes whose dependencies weren't found"""
return set(node.nostep_repr for node in self._depdict.pending
if node.element != self._ira.IRDst)
@property
def relevant_nodes(self):
"""Set of nodes directly and indirectly influencing
@self.input_depnodes"""
output = set()
for depnodes in self._depdict.cache.values():
output.update(depnodes)
return output
@property
def relevant_labels(self):
"""List of labels containing nodes influencing @self.input_depnodes.
The history order is preserved.
"""
# Get used labels
used_labels = set(depnode.label for depnode in self.relevant_nodes)
# Keep history order
output = []
for label in [depdict.label for depdict in self.history]:
if label in used_labels:
output.append(label)
return output
@property
def input(self):
"""Set of DependencyGraph start nodes"""
return self._input_depnodes
@property
def has_loop(self):
"""True if current dictionnary has a loop"""
if self._has_loop is None:
self._has_loop = (len(self.relevant_labels) !=
len(set(self.relevant_labels)))
return self._has_loop
def emul(self, ctx=None, step=False):
"""Symbolic execution of relevant nodes according to the history
Return the values of input nodes' elements
@ctx: (optional) Initial context as dictionnary
@step: (optional) Verbose execution
Warning: The emulation is not sound if the input nodes depend on loop
variant.
"""
# Init
ctx_init = self._ira.arch.regs.regs_init
if ctx is not None:
ctx_init.update(ctx)
depnodes = self.relevant_nodes
affects = []
# Build a single affectation block according to history
for label in self.relevant_labels[::-1]:
affected_lines = set(depnode.line_nb for depnode in depnodes
if depnode.label == label)
irs = self._ira.blocs[label].irs
for line_nb in sorted(affected_lines):
affects.append(irs[line_nb])
# Eval the block
temp_label = asm_label("Temp")
symb_exec = symbexec(self._ira, ctx_init)
symb_exec.emulbloc(irbloc(temp_label, affects), step=step)
# Return only inputs values (others could be wrongs)
return {depnode.element: symb_exec.symbols[depnode.element]
for depnode in self.input}
class DependencyResultImplicit(DependencyResult):
"""Stand for a result of a DependencyGraph with implicit option
Provide path constraints using the z3 solver"""
__slots__ = ["_ira", "_depdict", "_input_depnodes", "_graph",
"_has_loop", "_solver"]
# Z3 Solver instance
_solver = None
def emul(self, ctx=None, step=False):
# Init
ctx_init = self._ira.arch.regs.regs_init
if ctx is not None:
ctx_init.update(ctx)
depnodes = self.relevant_nodes
solver = z3.Solver()
symb_exec = symbexec(self._ira, ctx_init)
temp_label = asm_label("Temp")
history = self.relevant_labels[::-1]
history_size = len(history)
for hist_nb, label in enumerate(history):
# Build block with relevant lines only
affected_lines = set(depnode.line_nb for depnode in depnodes
if depnode.label == label)
irs = self._ira.blocs[label].irs
affects = []
for line_nb in sorted(affected_lines):
affects.append(irs[line_nb])
# Emul the block and get back destination
dst = symb_exec.emulbloc(irbloc(temp_label, affects), step=step)
# Add constraint
if hist_nb + 1 < history_size:
next_label = history[hist_nb + 1]
expected = symb_exec.eval_expr(m2_expr.ExprId(next_label, 32))
constraint = m2_expr.ExprAff(dst, expected)
solver.add(Translator.to_language("z3").from_expr(constraint))
# Save the solver
self._solver = solver
# Return only inputs values (others could be wrongs)
return {depnode.element: symb_exec.symbols[depnode.element]
for depnode in self.input}
@property
def is_satisfiable(self):
"""Return True iff the solution path admits at least one solution
PRE: 'emul'
"""
return self._solver.check().r > 0
@property
def constraints(self):
"""If satisfiable, return a valid solution as a Z3 Model instance"""
if not self.is_satisfiable:
raise ValueError("Unsatisfiable")
return self._solver.model()
class FollowExpr(object):
"Stand for an element (expression, depnode, ...) to follow or not"
__slots__ = ["follow", "element"]
def __init__(self, follow, element):
self.follow = follow
self.element = element
@staticmethod
def to_depnodes(follow_exprs, label, line, modifier, step):
"""Build a set of FollowExpr(DependencyNode) from the @follow_exprs set
of FollowExpr
@follow_exprs: set of FollowExpr
@label: asm_label instance
@line: integer
@modifier: boolean
@step: integer
"""
dependencies = set()
for follow_expr in follow_exprs:
dependencies.add(FollowExpr(follow_expr.follow,
DependencyNode(label,
follow_expr.element,
line,
step,
modifier=modifier)))
return dependencies
@staticmethod
def extract_depnodes(follow_exprs, only_follow=False):
"""Extract depnodes from a set of FollowExpr(Depnodes)
@only_follow: (optional) extract only elements to follow"""
return set(follow_expr.element
for follow_expr in follow_exprs
if not(only_follow) or follow_expr.follow)
class DependencyGraph(object):
"""Implementation of a dependency graph
A dependency graph contains DependencyNode as nodes. The oriented edges
stand for a dependency.
The dependency graph is made of the lines of a group of IRblock
*explicitely* or *implicitely* involved in the equation of given element.
"""
def __init__(self, ira, implicit=False, apply_simp=True, follow_mem=True,
follow_call=True):
"""Create a DependencyGraph linked to @ira
The IRA graph must have been computed
@ira: IRAnalysis instance
@implicit: (optional) Imply implicit dependencies
Following arguments define filters used to generate dependencies
@apply_simp: (optional) Apply expr_simp
@follow_mem: (optional) Track memory syntactically
@follow_call: (optional) Track through "call"
"""
# Init
self._ira = ira
self._implicit = implicit
self._step_counter = itertools.count()
self._current_step = next(self._step_counter)
# The IRA graph must be computed
assert hasattr(self._ira, 'g')
# Create callback filters. The order is relevant.
self._cb_follow = []
if apply_simp:
self._cb_follow.append(self._follow_simp_expr)
self._cb_follow.append(lambda exprs: self._follow_exprs(exprs,
follow_mem,
follow_call))
self._cb_follow.append(self._follow_nolabel)
@property
def step_counter(self):
"Iteration counter"
return self._step_counter
@property
def current_step(self):
"Current value of iteration counter"
return self._current_step
def inc_step(self):
"Increment and return the current step"
self._current_step = next(self._step_counter)
return self._current_step
@staticmethod
def _follow_simp_expr(exprs):
"""Simplify expression so avoid tracking useless elements,
as: XOR EAX, EAX
"""
follow = set()
for expr in exprs:
follow.add(expr_simp(expr))
return follow, set()
@staticmethod
def get_expr(expr, follow, nofollow):
"""Update @follow/@nofollow according to insteresting nodes
Returns same expression (non modifier visitor).
@expr: expression to handle
@follow: set of nodes to follow
@nofollow: set of nodes not to follow
"""
if isinstance(expr, m2_expr.ExprId):
follow.add(expr)
elif isinstance(expr, m2_expr.ExprInt):
nofollow.add(expr)
return expr
@staticmethod
def follow_expr(expr, follow, nofollow, follow_mem=False, follow_call=False):
"""Returns True if we must visit sub expressions.
@expr: expression to browse
@follow: set of nodes to follow
@nofollow: set of nodes not to follow
@follow_mem: force the visit of memory sub expressions
@follow_call: force the visit of call sub expressions
"""
if not follow_mem and isinstance(expr, m2_expr.ExprMem):
nofollow.add(expr)
return False
if not follow_call and expr.is_function_call():
nofollow.add(expr)
return False
return True
@classmethod
def _follow_exprs(cls, exprs, follow_mem=False, follow_call=False):
"""Extracts subnodes from exprs and returns followed/non followed
expressions according to @follow_mem/@follow_call
"""
follow, nofollow = set(), set()
for expr in exprs:
expr.visit(lambda x: cls.get_expr(x, follow, nofollow),
lambda x: cls.follow_expr(x, follow, nofollow,
follow_mem, follow_call))
return follow, nofollow
@staticmethod
def _follow_nolabel(exprs):
"""Do not follow labels"""
follow = set()
for expr in exprs:
if not expr_is_label(expr):
follow.add(expr)
return follow, set()
def _follow_apply_cb(self, expr):
"""Apply callback functions to @expr
@expr : FollowExpr instance"""
follow = set([expr])
nofollow = set()
for callback in self._cb_follow:
follow, nofollow_tmp = callback(follow)
nofollow.update(nofollow_tmp)
out = set(FollowExpr(True, expr) for expr in follow)
out.update(set(FollowExpr(False, expr) for expr in nofollow))
return out
def _get_irs(self, label):
"Return the irs associated to @label"
return self._ira.blocs[label].irs
def _get_affblock(self, depnode):
"""Return the list of ExprAff associtiated to @depnode.
LINE_NB must be > 0"""
return self._get_irs(depnode.label)[depnode.line_nb - 1]
def _direct_depnode_dependencies(self, depnode):
"""Compute and return the dependencies involved by @depnode,
over the instruction @depnode.line_,.
Return a set of FollowExpr"""
if isinstance(depnode.element, m2_expr.ExprInt):
# A constant does not have any dependency
output = set()
elif depnode.line_nb == 0:
# Beginning of a block, inter-block resolving is not done here
output = set()
else:
# Intra-block resolving
# Get dependencies
read = set()
modifier = False
for affect in self._get_affblock(depnode):
if affect.dst == depnode.element:
elements = self._follow_apply_cb(affect.src)
read.update(elements)
modifier = True
# If it's not a modifier affblock, reinject current element
if not modifier:
read = set([FollowExpr(True, depnode.element)])
# Build output
output = FollowExpr.to_depnodes(read, depnode.label,
depnode.line_nb - 1, modifier,
self.current_step)
return output
def _resolve_intrablock_dep(self, depdict):
"""Resolve the dependencies of nodes in @depdict.pending inside
@depdict.label until a fixed point is reached.
@depdict: DependencyDict to update"""
# Prepare the work list
todo = set(depdict.pending)
# Pending states will be handled
depdict.pending.clear()
while todo:
depnode = todo.pop()
if isinstance(depnode.element, m2_expr.ExprInt):
# A constant does not have any dependency
continue
if depdict.is_head(depnode):
depdict.pending.add(depnode)
# A head cannot have dependencies inside the current IRblock
continue
# Find dependency of the current depnode
sub_depnodes = self._direct_depnode_dependencies(depnode)
depdict.cache[depnode] = FollowExpr.extract_depnodes(sub_depnodes)
# Add to the worklist its dependencies
todo.update(FollowExpr.extract_depnodes(sub_depnodes,
only_follow=True))
# Pending states will be overriden in cache
for depnode in depdict.pending:
try:
del depdict.cache[depnode]
except KeyError:
continue
def _get_previousblocks(self, label):
"""Return an iterator on predecessors blocks of @label, with their
lengths"""
preds = self._ira.g.predecessors_iter(label)
for pred_label in preds:
length = len(self._get_irs(pred_label))
yield (pred_label, length)
def _compute_interblock_dep(self, depnodes, heads):
"""Create a DependencyDict from @depnodes, and propagate
DependencyDicts through all blocs
"""
# Create a DependencyDict which will only contain our depnodes
current_depdict = DependencyDict(list(depnodes)[0].label, [])
current_depdict.pending.update(depnodes)
# Init the work list
done = {}
todo = deque([current_depdict])
while todo:
depdict = todo.popleft()
# Update the dependencydict until fixed point is reached
self._resolve_intrablock_dep(depdict)
self.inc_step()
# Clean irrelevant path
depdict.filter_unmodifier_loops(self._implicit, self._ira.IRDst)
# Avoid infinite loops
label = depdict.label
if depdict in done.get(label, []):
continue
done.setdefault(label, []).append(depdict)
# No more dependencies
if len(depdict.pending) == 0:
yield depdict.copy()
continue
# Has a predecessor ?
is_final = True
# Propagate the DependencyDict to all parents
for label, irb_len in self._get_previousblocks(depdict.label):
is_final = False
# Duplicate the DependencyDict
new_depdict = depdict.extend(label)
if self._implicit:
# Implicit dependencies: IRDst will be link with heads
implicit_depnode = DependencyNode(label, self._ira.IRDst,
irb_len,
self.current_step,
modifier=False)
# Create links between DependencyDict
for depnode_head in depdict.pending:
# Follow the head element in the parent
new_depnode = DependencyNode(label, depnode_head.element,
irb_len,
self.current_step)
# The new node has to be analysed
new_depdict.cache[depnode_head] = set([new_depnode])
new_depdict.pending.add(new_depnode)
# Handle implicit dependencies
if self._implicit:
new_depdict.cache[depnode_head].add(implicit_depnode)
new_depdict.pending.add(implicit_depnode)
# Manage the new element
todo.append(new_depdict)
# Return the node if it's a final one, ie. it's a head (in graph
# or defined by caller)
if is_final or depdict.label in heads:
yield depdict.copy()
def get(self, label, elements, line_nb, heads):
"""Compute the dependencies of @elements at line number @line_nb in
the block named @label in the current IRA, before the execution of
this line. Dependency check stop if one of @heads is reached
@label: asm_label instance
@element: set of Expr instances
@line_nb: int
@heads: set of asm_label instances
Return an iterator on DiGraph(DependencyNode)
"""
# Init the algorithm
input_depnodes = set()
for element in elements:
input_depnodes.add(DependencyNode(label, element, line_nb,
self.current_step))
# Compute final depdicts
depdicts = self._compute_interblock_dep(input_depnodes, heads)
# Unify solutions
unified = []
cls_res = DependencyResultImplicit if self._implicit else \
DependencyResult
for final_depdict in depdicts:
# Keep only relevant nodes
final_depdict.clean_modifiers_in_cache(input_depnodes)
final_depdict.filter_used_nodes(input_depnodes)
# Remove duplicate solutions
if final_depdict not in unified:
unified.append(final_depdict)
# Return solutions as DiGraph
yield cls_res(self._ira, final_depdict, input_depnodes)
def get_from_depnodes(self, depnodes, heads):
"""Alias for the get() method. Use the attributes of @depnodes as
argument.
PRE: Labels and lines of depnodes have to be equals
@depnodes: set of DependencyNode instances
@heads: set of asm_label instances
"""
lead = list(depnodes)[0]
elements = set(depnode.element for depnode in depnodes)
return self.get(lead.label, elements, lead.line_nb, heads)
def get_from_end(self, label, elements, heads):
"""Alias for the get() method. Consider that the dependency is asked at
the end of the block named @label.
@label: asm_label instance
@elements: set of Expr instances
@heads: set of asm_label instances
"""
return self.get(label, elements, len(self._get_irs(label)), heads)
| fmonjalet/miasm | miasm2/analysis/depgraph.py | Python | gpl-2.0 | 35,628 | [
"VisIt"
] | 309f6c641d45f36a5ce7eb00975a3e9d93693cb6c543ba8e3d1e6a9b725a6e3d |
#!/opt/local/bin/python
__author__ = "Andrew G. Clark"
__date__ = "7 May 2014"
__maintainer__ = "Andrew G. Clark"
__email__ = "andrew.clark@curie.fr"
""" This script analyzes linescans and extracts cortex thickness and density from actin/membrane linescan pairs.
The script can be run in a 'pair' mode (to analyze a single linescan pair)
or 'batch' mode (to analyze multiple directories full of linescan pairs).
The mode can be specified at the bottom ("main" function).
For batch mode:
Your parent directory should contain a file called 'dir_list.dat'
with the following information in row/column form, with only space as delimiters:
sub_dir px_size category ch_actin sigma_actin
stk_1 0.05 control 1 0.119
stk_2 0.04 siRNA 2 0.220
...
The first row must contain the column headers as shown
Definitions of input parameters:
sub_dir: The name of the sub-directory containing the linescan pairs (linescan pairs must end in '...average.dat')
px_size: The pixel size for the linescans in the given sub_dir
category: The category of the experiment in each sub_dir (can be used for plotting later)
ch_actin: The actin channel (either '1' or '2'; used for extracting cortex thickness/i_c)
sigma_actin: The sigma of the point spread function for the actin channel (used for extracting h/i_c)
Note: For the sub_dir entries in the dir_list, only those directories NOT appearing in 'completed_list_v4_1.dat' will be analyzed
Output:
In each sub-directory, a list called '.../ls_data/ls_fit_data.dat' will be created containing linescan and thickness data
-The columns are labeled according to channel number (ch1/ch2)
-delta is always the position of the peak intensity of channel 2 (ch2.x_peak) minus ch1.x_peak
In each sub-directory, plots of the linescans and the linescans with fits (if applicable) will be saved in '.../ls_plots/'
At the end, a master list of all of the data combined is be created in the parent_directory
For 'manual' mode:
When running the script, windows will pop up sequentially to request the following information:
-Channel 1 average linescan file
-Channel 2 average linescan file
-Pixel Size
-Actin Channel
-Sigma (Actin)
These parameters are defined above.
"""
import os
import math
from copy import deepcopy
from tkinter import *
from tkinter.filedialog import *
from tkinter.simpledialog import *
root = Tk()
import scipy
from scipy import optimize, stats
import pylab
import numpy as np
import utility_functions as uf
def gauss_func(p, x):
"""Definition of gaussian function used to fit linescan peaks.
p = [a, sigma, mu, c].
"""
a, sigma, mu, c = p #unpacks p (for readability)
g = a / (sigma * math.sqrt(2 * math.pi)) * scipy.exp(-(x - mu)**2 / (2 * sigma**2)) + c
return g
def convolved(p,x):
"""Defines convolved linescan. Args: x: float or list/iterable of floats,
the position for which convolved intensity is calculated; p: list/iterable
of floats, linecan parameters (p=[i_in, i_c, i_out, h, x_c, sigma]).
Returns: i: float, intensity at x.
"""
i_in, i_c, i_out, h, x_c, sigma = p #unpacks p (for readability)
i = (i_in + (i_c - i_in) * stats.norm.cdf((x - x_c) + h / 2., 0., sigma) +
(i_out - i_c) * stats.norm.cdf((x - x_c) - h / 2., 0., sigma))
return i
def unconvolved(p,x):
"""Defines unconvolved linescan. Args: x: float or list/iterable of floats,
the position for which intensity is calculated; p: list/iterable of floats,
linecan parameters (p=[i_in, i_c, i_out, h, x_c]). Returns: i: float,
intensity at x.
"""
i_in, i_c, i_out, h, x_c = p #unpacks p (for readability)
i = np.zeros(len(x))
for j in range(len(x)):
if x[j] < x_c - h / 2.:
i[j] = i_in
if x[j] >= x_c - h / 2. and x[j] < x_c + h / 2.:
i[j] = i_c
if x[j] >= x_c + h / 2.:
i[j] = i_out
return i
def sort_ls_list(list):
"""Sorts list of linescan files by keyword.
Args:
list (list): the list to be sorted (here, linescan filenames)
param (str): the keyword to use for sorting (here, usually 'frame')
"""
def find_key(line):
key = int(re.search('frame_([0-9]+)_', line).group(1))
return key
list.sort(key=find_key)
return list
class Linescan():
"""Linescan object with methods to extract important parameters
from linescans.
"""
def __init__(self,x,i):
"""Initializes linescan.
Args:
x (list of numbers): the position values
i (list of numbers): the intensity values
"""
#populate linescan position/intensity
self.x = np.array(x,dtype='float') #position list as NumPy array of floats
self.i = np.array(i,dtype='float') #intensity list as NumPy array of floats
#detminere a few easy parameters from position/intensity
self.H = self.x[-1] - self.x[0]
self.i_tot = np.trapz(self.i,self.x)
#populate other attributes
self.dist_to_x_in_out = 1. #specifies how far away x_in is from the peak (in um)
self.gauss_params = None #parameter list from Gaussian fit to find peak
self.x_peak = None #linescan peak position
self.i_peak = None #linescan peak intensity
self.i_in = None #intracellular intensity
self.i_out = None #extracellular intensity
self.max_idx = None #index of point near linescan center with highest intensity
self.x_fit = None #position list used for peak fitting
self.i_fit = None #intensity list used for peak fitting
self.i_in_x_list = None #position list used to determine self.i_in
self.i_in_i_list = None #intensity list used to determine self.i_in
self.i_out_x_list = None #position list used to determine self.i_out
self.i_out_i_list = None #intensity list used to determine self.i_out
self.x_in_upper_index = None #the index at the upper end of the region where x_in is calculated
self.x_out_lower_index = None #the index at the lower end of the region where x_out is calculated
self.fwhm = None #full width at half-max
#initializes linescans and determines linescan parameters
self.extract_ls_parameters()
def convert_px_to_um(self):
"""Multiplies list of coordinates by pixel_size."""
self.x = np.array([a * self.px_size for a in self.x])
def extract_ls_parameters(self):
"""Extracts intensity and position information from linescan"""
self.get_peak()
self.get_i_in_out()
self.get_fwhm()
def get_peak(self):
"""Finds the peak position and intensity of a linescan by fitting
a Gaussian near the peak.
"""
#restricts fitting to near the center of the linescan
self.max_idx = int(np.argmax(self.i[int(len(self.i)/2-6):int(len(self.i)/2+20)]) + len(self.i)/2-6)
self.x_fit = self.x[int(self.max_idx-2):int(self.max_idx+3)]
self.i_fit = self.i[int(self.max_idx-2):int(self.max_idx+3)]
#picks reasonable starting values for fit
self.i_in_guess = np.mean(self.i[:int(self.max_idx-14)])
a = (self.i[self.max_idx] - self.i_in_guess) / 2.4
sigma = 0.170
mu = self.x[self.max_idx]
b = self.i_in_guess
#perform fit with starting values
p0 = [a, sigma, mu, b]
p1, success = optimize.leastsq(self.residuals_gauss,p0,
args=(self.x_fit, self.i_fit),
maxfev = 1000000)
self.gauss_params = p1
self.x_peak = p1[2]
self.i_peak = gauss_func(p1, self.x_peak)
def get_i_in_out(self):
"""Gets values for intracellular intensity (self.i_in) and
extracellular intensity (self.i_out). The left of the linescan
(nearer zero) is always assumed to be the intracellular side.
Note: the i_in and i_out values are calculated to be the average value
of the ten points out from the distance between the peak and position x away
from the peak, where x is given by self.dist_to_x_in_out (defined in __init__).
"""
x_in_upper = self.x_peak - self.dist_to_x_in_out
x_in_upper_index = np.argmin(abs(self.x - x_in_upper))
self.x_in_upper_index = x_in_upper_index #for use in finding total intensity for density calculation
self.i_in_x_list = self.x[x_in_upper_index-10:x_in_upper_index]
self.i_in_i_list = self.i[x_in_upper_index-10:x_in_upper_index]
self.i_in = np.mean(self.i_in_i_list)
x_out_lower = self.x_peak + self.dist_to_x_in_out
x_out_lower_index = np.argmin(abs(self.x - x_out_lower))
self.x_out_lower_index = x_out_lower_index #for use in finding total intensity for density calculation
self.i_out_x_list = self.x[x_out_lower_index:x_out_lower_index+10]
self.i_out_i_list = self.i[x_out_lower_index:x_out_lower_index+10]
self.i_out = np.mean(self.i_out_i_list)
def residuals_gauss(self,p,x,x_data):
"""Returns residuals for Gaussian fit of the intensity peak.
Possible values for fit parameters are constrained to avoid
overestimation of peak intensity.
Args:
p (list): fit parameters, [a, sigma, mu, c]
x (list): position values
x_data (list): intensity values
Returns:
residuals (list): residuals for fit
-or-
fail_array (list): in place of residuals if the fit fails
"""
a, sigma, mu, c = p #unpacks p (for readability)
i_peak_guess = gauss_func(p, mu)
fail_array = np.ones(len(x)) * 99999.
if all([sigma >= 0.1,
abs(i_peak_guess - self.i[self.max_idx]) < 0.5 * self.i[self.max_idx]]):
residuals = gauss_func(p,x) - x_data
return residuals
else:
return fail_array
def get_fwhm(self):
"""Calculates the full-width at half maximum (FWHM) of the linescan peak"""
#determines half-max
hm = (self.i_in + self.i_peak) / 2.
# print(hm)
# finds points closest to hm to the left of the peak
search = self.i[:self.max_idx]
self.left_index = (np.abs(search - hm)).argmin()
if hm > self.i[self.left_index]:
self.left_index_left = deepcopy(self.left_index)
self.left_index_right = self.left_index_left + 1
else:
self.left_index_right = deepcopy(self.left_index)
self.left_index_left = self.left_index_right - 1
#gets interpolated intensity (linear interpolation between 2 surrounding points
m_left = (self.i[self.left_index_right] - self.i[self.left_index_left]) / (self.x[self.left_index_right] - self.x[self.left_index_left])
b_left = self.i[self.left_index_right] - m_left * self.x[self.left_index_right]
x_fwhm_left = (hm - b_left) / m_left
self.fwhm_left = [x_fwhm_left,hm]
#finds point closest to hm to the right of the peak
search = self.i[self.max_idx:]
self.right_index = (np.abs(search - hm)).argmin() + self.max_idx
if hm < self.i[self.right_index]:
self.right_index_left = deepcopy(self.right_index)
self.right_index_right = self.right_index_left + 1
else:
self.right_index_right = deepcopy(self.right_index)
self.right_index_left = self.right_index_right - 1
#gets interpolated intensity (linear interpolation between 2 surrounding points
m_right = (self.i[self.right_index_right] - self.i[self.right_index_left]) / (self.x[self.right_index_right] - self.x[self.right_index_left])
b_right = self.i[self.right_index_right] - m_right * self.x[self.right_index_right]
x_fwhm_right = (hm - b_right) / m_right
self.fwhm_right = [x_fwhm_right,hm]
self.fwhm = x_fwhm_right - x_fwhm_left
class Cortex():
"""A Class for a cortex, with actin and membrane linescans and
methods to determine cortex thickness and density.
"""
def __init__(self,ch1,ch2,sigma_actin,ch_actin=1):
"""Initializes linescan pairs and remaining attributes.
Args:
ch1 (Linescan class): the ch1 linescan
ch2 (Linescan class): the ch2 linescan
sigma_actin (float): the sigma of the PSF for the actin channel
Kwargs:
ch_actin (int): says which channel is actin
"""
self.ch1 = ch1
self.ch2 = ch2
self.sigma_actin = sigma_actin
self.ch_actin = ch_actin
self.delta = self.ch2.x_peak - self.ch1.x_peak #separation between ch2 and ch1 peaks
if self.ch_actin==1:
self.actin = self.ch1
self.memb = self.ch2
elif self.ch_actin==2:
self.actin = self.ch2
self.memb = self.ch1
else:
self.actin = None
self.memb = None
self.h_max = 1. #maximum cortex thickness (for constraining fit)
self.i_c_max = 500. #maximum cortex intensity (for constraining fit)
self.h = None #cortex thickness (from fit)
self.i_c = None #cortical actin intensity (from fit)
self.density = None #cortical actin density
self.X_c = None #background-independent center position of the cortical actin (from fit)
self.solution = None #solution from actin cortex thickness fit
def get_h_i_c(self):
""" Performs fit to get cortex thickness, h, and cortex intensity, i_c
Note: density is calculated as the difference between fitted cortex intensity
and intracellular background, normalized by the intensity from the beginning
of the linescan to end of the i_out calculation region
"""
delta = abs(self.delta)
#SET STARTING VALUES FOR ROOTS AND SOLUTIONS
self.solution = 2e+20
#only try fitting if the peak is higher than both i_in and i_out
if ((self.actin.i_out - self.actin.i_peak) /
(self.actin.i_in - self.actin.i_peak))>=0:
#loops through several different starting values for i_c and h
for i_c_factor in np.arange(2.,3.1,0.2):
for h_factor in np.arange(0.5, 2.1, 0.2):
i_c_start = self.actin.i_peak * i_c_factor
delta_start = ((self.sigma_actin**2 / delta*2) *
np.log((self.actin.i_out - i_c_start) /
(self.actin.i_in - i_c_start)))
h_start = 2 * (delta - delta_start) * h_factor
#performs fit
p0 = [h_start, i_c_start]
try:
result = optimize.leastsq(self.residuals, p0,
maxfev=100000, full_output=1)
solution_temp = np.sum([x**2 for x in result[2]['fvec']])
if solution_temp < self.solution:
self.solution = deepcopy(solution_temp)
p1 = result[0]
except TypeError:
pass
#controls for bad fits
if any([self.solution>0.01,
p1[0] >= self.h_max - 0.001,
p1[1] >= self.i_c_max - 1.]):
p1 = [None, None]
self.h = None
self.i_c = None
self.density = None
self.X_c = None
self.solution = None
else:
self.h, self.i_c = p1
actin_ls_mean = np.mean(self.actin.i[:self.actin.x_out_lower_index+10])
self.density = (self.i_c - self.actin.i_in) / actin_ls_mean
self.X_c = self.memb.x_peak - self.h / 2.
def residuals(self,p):
"""Calculates residuals for cortex linescan fit to extract cortex
thickness and intensity values
Args:
p (list of floats): [thickness, cortex_intensity]
Returns:
residuals (list of floats): [residual1, residual2]
-or-
fail_array (list of floats): [1000000., 1000000.]
(returned only if fitting fails)
"""
fail_array = [1000000., 1000000.]
#constrains fit and ensures log term is positive
if all([self.h_max>p[0]>0,
self.i_c_max>p[1]>self.actin.i_in,
(self.actin.i_out - p[1]) / (self.actin.i_in - p[1]) > 0]):
#X_c is the position of the center of the cortex
#x_c is the position of the cortex peak
X_c_try = self.memb.x_peak - p[0] / 2.
delta_try = (self.sigma_actin**2 / p[0]) * np.log((self.actin.i_out - p[1]) / (self.actin.i_in - p[1]))
x_c_try = X_c_try - delta_try
i_peak_try = convolved([self.actin.i_in, p[1], self.actin.i_out, p[0], X_c_try, self.sigma_actin], x_c_try)
#residuals are difference between calculated peak position/intensity and values from data
residuals = [x_c_try - self.actin.x_peak, i_peak_try - self.actin.i_peak]
return residuals
else:
return fail_array
def plot_lss(self):
"""Plots linescans"""
fig = pylab.figure()
ax = fig.add_subplot(1,1,1)
#plots raw data
pylab.plot(self.ch1.x,self.ch1.i,'go',label="Ch. 1")
pylab.plot(self.ch2.x,self.ch2.i,'ro',label="Ch. 2")
#plots points used for determining i_in and i_out
pylab.plot(self.ch1.i_in_x_list,self.ch1.i_in_i_list,'yo',label=r"$i_{\rm{in}}$, $i_{\rm{out}}$")
pylab.plot(self.ch2.i_in_x_list,self.ch2.i_in_i_list,'yo')
pylab.plot(self.ch1.i_out_x_list,self.ch1.i_out_i_list,'yo')
pylab.plot(self.ch2.i_out_x_list,self.ch2.i_out_i_list,'yo')
#plots points used to calculate fwhm and shows the fwhm
# pylab.plot(self.ch1.x[self.ch1.left_index_left],self.ch1.i[self.ch1.left_index_left],'ko',label="fwhm points")
# pylab.plot(self.ch1.x[self.ch1.left_index_left],self.ch1.i[self.ch1.left_index_left],'ko')
# pylab.plot(self.ch1.x[self.ch1.left_index_right],self.ch1.i[self.ch1.left_index_right],'ko')
# pylab.plot(self.ch1.x[self.ch1.right_index_left],self.ch1.i[self.ch1.right_index_left],'ko')
# pylab.plot(self.ch1.x[self.ch1.right_index_right],self.ch1.i[self.ch1.right_index_right],'ko')
#
# pylab.plot(self.ch2.x[self.ch2.left_index_left],self.ch2.i[self.ch2.left_index_left],'ko')
# pylab.plot(self.ch2.x[self.ch2.left_index_right],self.ch2.i[self.ch2.left_index_right],'ko')
# pylab.plot(self.ch2.x[self.ch2.right_index_left],self.ch2.i[self.ch2.right_index_left],'ko')
# pylab.plot(self.ch2.x[self.ch2.right_index_right],self.ch2.i[self.ch2.right_index_right],'ko')
x_fwhm1, i_fwhm1 = zip(self.ch1.fwhm_left,self.ch1.fwhm_right)
x_fwhm2, i_fwhm2 = zip(self.ch2.fwhm_left,self.ch2.fwhm_right)
pylab.plot(x_fwhm1, i_fwhm1,'g',ls='-',marker='x',label="fwhm")
pylab.plot(x_fwhm2, i_fwhm2,'r',ls='-',marker='x',label='fwhm')
# x_fwhm1 = [self.ch1.x[self.ch1.left_index],self.ch1.x[self.ch1.right_index]]
# y_fwhm1 = (self.ch1.i[self.ch1.left_index] + self.ch1.i[self.ch1.right_index]) / 2.
# i_fwhm1 = [y_fwhm1,y_fwhm1]
# pylab.plot(x_fwhm1,i_fwhm1,'g-',label="fwhm")
#
# x_fwhm2 = [self.ch2.x[self.ch2.left_index],self.ch2.x[self.ch2.right_index]]
# y_fwhm2 = (self.ch2.i[self.ch2.left_index] + self.ch2.i[self.ch2.right_index]) / 2.
# i_fwhm2 = [y_fwhm2,y_fwhm2]
# pylab.plot(x_fwhm2,i_fwhm2,'r-',label="fwhm")
#plots gaussian fit curve
x_gauss_fit_ch1 = np.linspace(self.ch1.x_fit[0],self.ch1.x_fit[-1],100)
i_gauss_fit_ch1 = gauss_func(self.ch1.gauss_params,x_gauss_fit_ch1)
pylab.plot(x_gauss_fit_ch1,i_gauss_fit_ch1,'b',label="Peak fit")
x_gauss_fit_ch2 = np.linspace(self.ch2.x_fit[0],self.ch2.x_fit[-1],100)
i_gauss_fit_ch2 = gauss_func(self.ch2.gauss_params,x_gauss_fit_ch2)
pylab.plot(x_gauss_fit_ch2,i_gauss_fit_ch2,'b')
#finish plot
y_min, y_max = ax.get_ylim()
pylab.ylim = (0,y_max)
pylab.xlabel("Position ($\mu$m)")
pylab.ylabel("Intensity (AU)")
pylab.legend(loc='upper right')
pylab.gcf().subplots_adjust(bottom=0.15)
def plot_fits(self):
"""Plots linescan pair with fitted cortex thickness"""
fig = pylab.figure()
ax = fig.add_subplot(1,1,1)
if self.ch_actin==1 or self.ch_actin=="1":
color_actin = 'g'
color_memb = 'r'
elif self.ch_actin==2 or self.ch_actin=="2":
color_actin = 'r'
color_memb = 'g'
else:
raise ValueError("Please specify ch_actin as <<1>>, <<2>> for plotting fit!")
#plots raw data
pylab.plot(self.memb.x,self.memb.i,'o',color=color_memb,label="Memb. (raw)")
pylab.plot(self.actin.x,self.actin.i,'o',color=color_actin,label="Actin (raw)")
#plots unconvolved and extracted actin linescans from fits
x_actin_hd = np.linspace(self.actin.x[0],self.actin.x[-1],1000)
i_actin_unconv = unconvolved([self.actin.i_in, self.i_c,
self.actin.i_out, self.h, self.X_c],
x_actin_hd)
i_actin_conv = convolved([self.actin.i_in, self.i_c,
self.actin.i_out, self.h, self.X_c, self.sigma_actin],
x_actin_hd)
pylab.plot(x_actin_hd,i_actin_unconv,ls='-',color=color_actin, label='fit')
pylab.plot(x_actin_hd,i_actin_conv,ls='--',color=color_actin, label='fit (conv.)')
pylab.axvline(x=self.memb.x_peak, color=color_memb, ls='--', label="Memb. (peak)")
#finishes plot
y_min, y_max = ax.get_ylim()
pylab.ylim = (0,y_max)
pylab.xlabel("Position ($\mu$m)")
pylab.ylabel("Intensity (AU)")
pylab.legend(loc='upper right')
pylab.gcf().subplots_adjust(bottom=0.15)
def write_master_list(parent_dir,version):
"""Writes a master data lis in the parent directory for batch mode.
Args:
parent_dir (string): path of the parent directory
version (string): the version of the software (for naming output file)
"""
dir_list_path = parent_dir + '/dir_list.dat'
subdir_list = [_[0] for _ in uf.read_file(dir_list_path)][1:]
master_data = []
for i in range(len(subdir_list)):
data_dir = parent_dir + '/' + subdir_list[i]
data = uf.read_file(data_dir + '/ls_data/ls_data.dat')
if i==0:
for line in data:
master_data.append(line)
else:
for line in data[1:]:
master_data.append(line)
# print master_data
uf.save_data_array(master_data, parent_dir + '/master_list_v%s.dat'%version)
def load_ls(ls_path,px_size=1.):
"""Loads a linescan file
Args:
ls_path (str): path of the average linescan file to be loaded
px_size (float): pixel size in microns
Returns:
x (numpy array): the positions (in microns)
i (numpy array): the intensities
"""
ls_data = uf.read_file(ls_path)
x = np.array([float(_[0]) for _ in ls_data]) * px_size
i = np.array([float(_[1]) for _ in ls_data])
return x,i
def analyze_cortex(file_ch1,file_ch2,px_size,ch_actin,sigma_actin):
"""Extracts linescan parameters and coretx thickness/density
for a pair of linescans
Args:
file_ch1 (str): the filepath for the first linescan
file_ch2 (str): the filepath for the second linescan
px_size (float): the pixel size for the linescans (for the whole directory)
ch_actin (int): the channel of the actin linescan (1 or 2)
sigma_actin (float): the sigma of the PSF for the actin channel
Kwargs:
category (str): used to keep track of different conditions in the output data file
Returns:
cortex (Cortex class): the cortex with associated attributes
"""
x_ch1, i_ch1 = load_ls(file_ch1,px_size=px_size)
x_ch2, i_ch2 = load_ls(file_ch2,px_size=px_size)
x = deepcopy(x_ch1) #the x values should be the same for both linescans!
basename = file_ch1.split('/')[-1][:-4]
print('Analyzing file pair for:', basename)
# extracts data
actin = Linescan(x,i_ch1)
memb = Linescan(x,i_ch2)
cortex = Cortex(actin, memb, sigma_actin, ch_actin=ch_actin)
if ch_actin==1 or ch_actin==2:
cortex.get_h_i_c()
elif ch_actin == "None":
pass
else:
raise ValueError("Please specify ch_actin as <<1>> or <<2>> for %s!"%file_ch1)
print('h =', cortex.h)
return cortex
def analyze_ls_pair(file_ch1,file_ch2,px_size,ch_actin,sigma_actin,version):
"""Analyzes linescans to extract cortex thickness/density
for a single linescan pair. Data and plots are generated and saved
to a new folder with same name as file_ch1
Args:
file_ch1 (str): the filepath for the first linescan
file_ch2 (str): the filepath for the second linescan
px_size (float): the pixel size for the linescans (for the whole directory)
ch_actin (int): the channel of the actin linescan (1 or 2)
sigma_actin (float): the sigma of the PSF for the actin channel
"""
# makes directory in data_dir for saving
save_dir = file_ch1[:-4] + '_ls_data'
uf.make_dir(save_dir)
# makes a list of parameters to extract from cortex data
data_to_write = [['basename', 'category',
'delta', 'h', 'i_c', 'density', 'X_c', 'solution',
'ch1.i_tot', 'ch1.H', 'ch1.x_peak', 'ch1.i_peak', 'ch1.i_in', 'ch1.i_out', 'ch1.fwhm',
'ch2.i_tot', 'ch2.H', 'ch2.x_peak', 'ch2.i_peak', 'ch2.i_in', 'ch2.i_out', 'ch2.fwhm'
]]
basename = file_ch1.split('/')[-1][:-4]
category = 'pair'
#gets cortex and linescan data
cortex = analyze_cortex(file_ch1, file_ch2, px_size, ch_actin, sigma_actin)
# plots raw linescans
cortex.plot_lss()
pylab.savefig(save_dir + "/" + basename + ".png")
pylab.close()
# plots linescans with h fits
if cortex.h != None:
cortex.plot_fits()
pylab.savefig(save_dir + "/" + basename + "_fit.png")
pylab.close()
# gets extracted linescan data
data_temp = [basename, category]
for param in data_to_write[0][2:]:
data_temp.append(eval("cortex.%s" % param))
data_to_write.append(data_temp)
# print data_to_write
uf.save_data_array(data_to_write, save_dir + "/ls_data.dat")
def analyze_dir(data_dir,px_size,category,ch_actin,sigma_actin,version):
""" Analyzes all linescan pairs in a directory full of linescans
Args:
data_dir (str): the directory containing the linescans
px_size (float): the pixel size for the linescans (for the whole directory)
category (str): the category for the experiment
ch_actin (int): the channel of the actin linescan (1 or 2)
version (str): version number (for output filenames)
"""
#makes necessary directories in data_dir for saving
save_dir = data_dir + '/ls_data'
uf.make_dir(save_dir)
#makes a list of parameters to extract from cortex data
data_to_write = [['basename','category',
'delta', 'h', 'i_c', 'density', 'X_c', 'solution',
'ch1.i_tot','ch1.H','ch1.x_peak','ch1.i_peak','ch1.i_in','ch1.i_out','ch1.fwhm',
'ch2.i_tot','ch2.H','ch2.x_peak','ch2.i_peak','ch2.i_in','ch2.i_out','ch2.fwhm'
]]
#gets and sorts list of average linescans
linescan_list = [x for x in os.listdir(data_dir) if 'average.dat' in x]
for _ in linescan_list:
print(_)
print(re.search('frame' + '_([0-9]+)_', _).group(1))
linescan_list = sort_ls_list(linescan_list)
#extracts linescan parameters and thickness/density
for i in range(int(len(linescan_list)/2)):
file_ch1 = data_dir + '/' + linescan_list[2*i]
file_ch2 = data_dir + '/' + linescan_list[2*i + 1]
basename = file_ch1.split('/')[-1][:-4]
cortex = analyze_cortex(file_ch1,file_ch2,px_size,ch_actin,sigma_actin)
# plots raw linescans
cortex.plot_lss()
pylab.savefig(save_dir + "/" + basename + ".png")
pylab.close()
# plots linescans with h fits
if cortex.h != None:
cortex.plot_fits()
pylab.savefig(save_dir + "/" + basename + "_fit.png")
pylab.close()
# gets extracted linescan data
data_temp = [basename,category]
for param in data_to_write[0][2:]:
data_temp.append(eval("cortex.%s"%param))
data_to_write.append(data_temp)
# print data_to_write
uf.save_data_array(data_to_write,save_dir + "/ls_data.dat")
def main():
"""__main__ function"""
version = '5'
#set up root for asking questions
# root = Tk() #moved this up to the imports
root.withdraw()
#chooses analysis mode
mode = askinteger(title="Analysis Mode Selection",
prompt="Please enter:\n1 for pairwise analysis or\n2 for batch analysis",
minvalue=1,maxvalue=2)
if mode==1:
ch1_path = askopenfilename(title='Select an average linescan file for channel 1',
filetypes=[("dat", "*.dat")],
initialdir='.',
initialfile="")
ch2_path = askopenfilename(title='Select an average linescan file for channel 2',
filetypes=[("dat", "*.dat")],
initialdir='/'.join(ch1_path.split('/')[:-1]),
initialfile=ch1_path.split('/')[-1])
px_size = askfloat(title='Pixel Size',prompt='Please enter your pixel size')
ch_actin = askinteger(title='Actin Channel',prompt='Please enter the actin channel',
minvalue=1, maxvalue=2)
sigma_actin = askfloat(title='Actin Sigma',prompt='Please enter the sigma value\nfor the PSF for the actin channel\n(in microns)')
analyze_ls_pair(ch1_path,ch2_path,px_size,ch_actin,sigma_actin,version)
if mode==2:
parent_dir = askdirectory(title='Select the parent directory (be sure it contains dir_list.dat!)',
initialdir=os.path.split(os.path.realpath(__file__))[0])
# parent_dir = './test_data'
dir_list = uf.get_dict_list(uf.read_file(parent_dir + '/dir_list.dat'))
for line in dir_list:
sub_dir = line['sub_dir']
px_size = float(line['px_size'])
category = line['category']
ch_actin = int(line['ch_actin'])
sigma_actin = float(line['sigma_actin'])
data_dir = parent_dir + '/' + sub_dir
print(data_dir)
analyze_dir(data_dir,px_size,category,ch_actin,sigma_actin,version)
write_master_list(parent_dir,version)
if __name__ == '__main__':
main()
| PaluchLabUCL/CortexThicknessAnalysis | extract_cortex_thickness_v5.py | Python | mit | 31,575 | [
"Gaussian"
] | e1430aaef0efcce851545b21126c4c3efa83574bcd2172f103c56e1f33277266 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import threading
import time
import json
import hashlib
import base64
import socket
from shinken.log import logger
# For old users python-crypto was not mandatory, don't break their setup
try:
from Crypto.Cipher import AES
except ImportError:
logger.debug('Cannot find python lib crypto: export to kernel.shinken.io isnot available')
AES = None
from shinken.http_client import HTTPClient, HTTPException
BLOCK_SIZE = 16
def pad(data):
pad = BLOCK_SIZE - len(data) % BLOCK_SIZE
return data + pad * chr(pad)
def unpad(padded):
pad = ord(padded[-1])
return padded[:-pad]
class Stats(object):
def __init__(self):
self.name = ''
self.type = ''
self.app = None
self.stats = {}
# There are two modes that are not exclusive
# first the kernel mode
self.api_key = ''
self.secret = ''
self.http_proxy = ''
self.con = HTTPClient(uri='http://kernel.shinken.io')
# then the statsd one
self.statsd_sock = None
self.statsd_addr = None
def launch_reaper_thread(self):
self.reaper_thread = threading.Thread(None, target=self.reaper, name='stats-reaper')
self.reaper_thread.daemon = True
self.reaper_thread.start()
def register(self, app, name, _type, api_key='', secret='', http_proxy='',
statsd_host='localhost', statsd_port=8125, statsd_prefix='shinken',
statsd_enabled=False):
self.app = app
self.name = name
self.type = _type
# kernel.io part
self.api_key = api_key
self.secret = secret
self.http_proxy = http_proxy
# local statsd part
self.statsd_host = statsd_host
self.statsd_port = statsd_port
self.statsd_prefix = statsd_prefix
self.statsd_enabled = statsd_enabled
if self.statsd_enabled:
logger.debug('Loading statsd communication with %s:%s.%s',
self.statsd_host, self.statsd_port, self.statsd_prefix)
self.load_statsd()
# Also load the proxy if need
self.con.set_proxy(self.http_proxy)
# Let be crystal clear about why I don't use the statsd lib in python: it's crappy.
# how guys did you fuck this up to this point? django by default for the conf?? really?...
# So raw socket are far better here
def load_statsd(self):
try:
self.statsd_addr = (socket.gethostbyname(self.statsd_host), self.statsd_port)
self.statsd_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except (socket.error, socket.gaierror), exp:
logger.error('Cannot create statsd socket: %s' % exp)
return
# Will increment a stat key, if None, start at 0
def incr(self, k, v):
_min, _max, nb, _sum = self.stats.get(k, (None, None, 0, 0))
nb += 1
_sum += v
if _min is None or v < _min:
_min = v
if _max is None or v > _max:
_max = v
self.stats[k] = (_min, _max, nb, _sum)
# Manage local statd part
if self.statsd_sock and self.name:
# beware, we are sending ms here, v is in s
packet = '%s.%s.%s:%d|ms' % (self.statsd_prefix, self.name, k, v * 1000)
try:
self.statsd_sock.sendto(packet, self.statsd_addr)
except (socket.error, socket.gaierror), exp:
pass # cannot send? ok not a huge problem here and cannot
# log because it will be far too verbose :p
def _encrypt(self, data):
m = hashlib.md5()
m.update(self.secret)
key = m.hexdigest()
m = hashlib.md5()
m.update(self.secret + key)
iv = m.hexdigest()
data = pad(data)
aes = AES.new(key, AES.MODE_CBC, iv[:16])
encrypted = aes.encrypt(data)
return base64.urlsafe_b64encode(encrypted)
def reaper(self):
while True:
now = int(time.time())
stats = self.stats
self.stats = {}
if len(stats) != 0:
s = ', '.join(['%s:%s' % (k, v) for (k, v) in stats.iteritems()])
# If we are not in an initializer daemon we skip, we cannot have a real name, it sucks
# to find the data after this
if not self.name or not self.api_key or not self.secret:
time.sleep(60)
continue
metrics = []
for (k, e) in stats.iteritems():
nk = '%s.%s.%s' % (self.type, self.name, k)
_min, _max, nb, _sum = e
_avg = float(_sum) / nb
# nb can't be 0 here and _min_max can't be None too
s = '%s.avg %f %d' % (nk, _avg, now)
metrics.append(s)
s = '%s.min %f %d' % (nk, _min, now)
metrics.append(s)
s = '%s.max %f %d' % (nk, _max, now)
metrics.append(s)
s = '%s.count %f %d' % (nk, nb, now)
metrics.append(s)
# logger.debug('REAPER metrics to send %s (%d)' % (metrics, len(str(metrics))) )
# get the inner data for the daemon
struct = self.app.get_stats_struct()
struct['metrics'].extend(metrics)
# logger.debug('REAPER whole struct %s' % struct)
j = json.dumps(struct)
if AES is not None and self.secret != '':
logger.debug('Stats PUT to kernel.shinken.io/api/v1/put/ with %s %s' % (
self.api_key, self.secret))
# assume a %16 length messagexs
encrypted_text = self._encrypt(j)
try:
r = self.con.put('/api/v1/put/?api_key=%s' % (self.api_key), encrypted_text)
except HTTPException, exp:
logger.error('Stats REAPER cannot put to the metric server %s' % exp)
time.sleep(60)
statsmgr = Stats()
| claneys/shinken | shinken/stats.py | Python | agpl-3.0 | 6,954 | [
"CRYSTAL"
] | 25d7a5a0c3095dc662092db06bf573297d37002fe2f97b67e7dc9179aa477147 |
# -*- coding: utf-8 -*-
#
# gaussex.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create two populations on a 30x30 grid and connect them using a Gaussian probabilistic kernel
----------------------------------------------------------------------------------------------
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import matplotlib.pyplot as plt
import numpy as np
import nest
nest.ResetKernel()
#####################################################################
# create two test layers
pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.])
#####################################################################
# create and connect two populations
a = nest.Create('iaf_psc_alpha', positions=pos)
b = nest.Create('iaf_psc_alpha', positions=pos)
cdict = {'rule': 'pairwise_bernoulli',
'p': nest.spatial_distributions.gaussian(nest.spatial.distance,
std=0.5),
'mask': {'circular': {'radius': 3.0}}}
nest.Connect(a, b, cdict)
#####################################################################
# plot targets of neurons in different grid locations
#
# plot targets of two source neurons into same figure, with mask
# use different colors
for src_index, color, cmap in [(30 * 15 + 15, 'blue', 'Blues'), (0, 'green', 'Greens')]:
# obtain node id for center
src = a[src_index:src_index + 1]
fig = plt.figure()
nest.PlotTargets(src, b, mask=cdict['mask'], probability_parameter=cdict['p'],
src_color=color, tgt_color=color, mask_color=color,
probability_cmap=cmap, src_size=100,
fig=fig)
# beautify
plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5))
plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5))
plt.grid(True)
plt.axis([-2.0, 2.0, -2.0, 2.0])
plt.axes().set_aspect('equal', 'box')
plt.title('Connection targets, Gaussian kernel')
plt.show()
# plt.savefig('gaussex.pdf')
| SepehrMN/nest-simulator | pynest/examples/spatial/gaussex.py | Python | gpl-2.0 | 2,621 | [
"Gaussian"
] | a9f7b552ec091bdb14d61b69ff6a0fd3d4eaf11e434451686dd4460e3fe5edd0 |
from os import getcwd
from os import chmod
from os.path import join
from os.path import abspath
CAPTURE_RETURN_CODE = "return_code=$?"
YIELD_CAPTURED_CODE = 'sh -c "exit $return_code"'
from logging import getLogger
log = getLogger( __name__ )
def build_command(
runner,
job_wrapper,
container=None,
include_metadata=False,
include_work_dir_outputs=True,
remote_command_params={}
):
"""
Compose the sequence of commands necessary to execute a job. This will
currently include:
- environment settings corresponding to any requirement tags
- preparing input files
- command line taken from job wrapper
- commands to set metadata (if include_metadata is True)
"""
base_command_line = job_wrapper.get_command_line()
# job_id = job_wrapper.job_id
# log.debug( 'Tool evaluation for job (%s) produced command-line: %s' % ( job_id, base_command_line ) )
commands_builder = CommandsBuilder(base_command_line)
# All job runners currently handle this case which should never occur
if not commands_builder.commands:
return None
__handle_version_command(commands_builder, job_wrapper)
__handle_task_splitting(commands_builder, job_wrapper)
# One could imagine also allowing dependencies inside of the container but
# that is too sophisticated for a first crack at this - build your
# containers ready to go!
if not container:
__handle_dependency_resolution(commands_builder, job_wrapper, remote_command_params)
if container:
# Stop now and build command before handling metadata and copying
# working directory files back. These should always happen outside
# of docker container - no security implications when generating
# metadata and means no need for Galaxy to be available to container
# and not copying workdir outputs back means on can be more restrictive
# of where container can write to in some circumstances.
local_container_script = join( job_wrapper.working_directory, "container.sh" )
fh = file( local_container_script, "w" )
fh.write( "#!/bin/sh\n%s" % commands_builder.build() )
fh.close()
chmod( local_container_script, 0755 )
compute_container_script = local_container_script
if 'working_directory' in remote_command_params:
compute_container_script = "/bin/sh %s" % join(remote_command_params['working_directory'], "container.sh")
run_in_container_command = container.containerize_command(
compute_container_script
)
commands_builder = CommandsBuilder( run_in_container_command )
if include_work_dir_outputs:
__handle_work_dir_outputs(commands_builder, job_wrapper, runner, remote_command_params)
if include_metadata and job_wrapper.requires_setting_metadata:
__handle_metadata(commands_builder, job_wrapper, runner, remote_command_params)
return commands_builder.build()
def __handle_version_command(commands_builder, job_wrapper):
# Prepend version string
write_version_cmd = job_wrapper.write_version_cmd
if write_version_cmd:
commands_builder.prepend_command(write_version_cmd)
def __handle_task_splitting(commands_builder, job_wrapper):
# prepend getting input files (if defined)
if getattr(job_wrapper, 'prepare_input_files_cmds', None):
commands_builder.prepend_commands(job_wrapper.prepare_input_files_cmds)
def __handle_dependency_resolution(commands_builder, job_wrapper, remote_command_params):
local_dependency_resolution = remote_command_params.get("dependency_resolution", "local") == "local"
# Prepend dependency injection
if job_wrapper.dependency_shell_commands and local_dependency_resolution:
commands_builder.prepend_commands(job_wrapper.dependency_shell_commands)
def __handle_work_dir_outputs(commands_builder, job_wrapper, runner, remote_command_params):
# Append commands to copy job outputs based on from_work_dir attribute.
work_dir_outputs_kwds = {}
if 'working_directory' in remote_command_params:
work_dir_outputs_kwds['job_working_directory'] = remote_command_params['working_directory']
work_dir_outputs = runner.get_work_dir_outputs( job_wrapper, **work_dir_outputs_kwds )
if work_dir_outputs:
commands_builder.capture_return_code()
copy_commands = map(__copy_if_exists_command, work_dir_outputs)
commands_builder.append_commands(copy_commands)
def __handle_metadata(commands_builder, job_wrapper, runner, remote_command_params):
# Append metadata setting commands, we don't want to overwrite metadata
# that was copied over in init_meta(), as per established behavior
metadata_kwds = remote_command_params.get('metadata_kwds', {})
exec_dir = metadata_kwds.get( 'exec_dir', abspath( getcwd() ) )
tmp_dir = metadata_kwds.get( 'tmp_dir', job_wrapper.working_directory )
dataset_files_path = metadata_kwds.get( 'dataset_files_path', runner.app.model.Dataset.file_path )
output_fnames = metadata_kwds.get( 'output_fnames', job_wrapper.get_output_fnames() )
config_root = metadata_kwds.get( 'config_root', None )
config_file = metadata_kwds.get( 'config_file', None )
datatypes_config = metadata_kwds.get( 'datatypes_config', None )
compute_tmp_dir = metadata_kwds.get( 'compute_tmp_dir', None )
metadata_command = job_wrapper.setup_external_metadata(
exec_dir=exec_dir,
tmp_dir=tmp_dir,
dataset_files_path=dataset_files_path,
output_fnames=output_fnames,
set_extension=False,
config_root=config_root,
config_file=config_file,
datatypes_config=datatypes_config,
compute_tmp_dir=compute_tmp_dir,
kwds={ 'overwrite' : False }
) or ''
metadata_command = metadata_command.strip()
if metadata_command:
commands_builder.capture_return_code()
commands_builder.append_command("cd %s; %s" % (exec_dir, metadata_command))
def __copy_if_exists_command(work_dir_output):
source_file, destination = work_dir_output
return "if [ -f %s ] ; then cp %s %s ; fi" % ( source_file, source_file, destination )
class CommandsBuilder(object):
def __init__(self, initial_command):
# Remove trailing semi-colon so we can start hacking up this command.
# TODO: Refactor to compose a list and join with ';', would be more clean.
commands = initial_command.rstrip("; ")
self.commands = commands
# Coping work dir outputs or setting metadata will mask return code of
# tool command. If these are used capture the return code and ensure
# the last thing that happens is an exit with return code.
self.return_code_captured = False
def prepend_command(self, command):
self.commands = "%s; %s" % (command, self.commands)
return self
def prepend_commands(self, commands):
return self.prepend_command("; ".join(commands))
def append_command(self, command):
self.commands = "%s; %s" % (self.commands, command)
def append_commands(self, commands):
self.append_command("; ".join(commands))
def capture_return_code(self):
if not self.return_code_captured:
self.return_code_captured = True
self.append_command(CAPTURE_RETURN_CODE)
def build(self):
if self.return_code_captured:
self.append_command(YIELD_CAPTURED_CODE)
return self.commands
__all__ = [build_command]
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/lib/galaxy/jobs/command_factory.py | Python | gpl-3.0 | 7,582 | [
"Galaxy"
] | d9536dedfc05b347e2ab2ba982b406a7eb1db16a32f7213ef66ba10251e8a022 |
import _cmd
import threading
import pymol
P_GLUT_IDLE_EVENT = 0
P_GLUT_DISPLAY_EVENT = 1
P_GLUT_RESHAPE_EVENT = 2
P_GLUT_MOUSE_EVENT = 3
P_GLUT_MOTION_EVENT = 4
P_GLUT_CHAR_EVENT = 5
P_GLUT_SPECIAL_EVENT = 6
P_GLUT_PASSIVE_MOTION_EVENT= 7
P_GLUT_ACTIVE_ALT = 4
P_GLUT_ACTIVE_CTRL = 2
P_GLUT_ACTIVE_SHIFT = 1
P_GLUT_LEFT_BUTTON = 0
P_GLUT_MIDDLE_BUTTON = 1
P_GLUT_RIGHT_BUTTON = 2
P_GLUT_BUTTON_SCROLL_FORWARD= 3
P_GLUT_BUTTON_SCROLL_BACKWARD=4
P_GLUT_DOWN = 0
P_GLUT_UP = 1
P_GLUT_KEY_F1 = 1
P_GLUT_KEY_F2 = 2
P_GLUT_KEY_F3 = 3
P_GLUT_KEY_F4 = 4
P_GLUT_KEY_F5 = 5
P_GLUT_KEY_F6 = 6
P_GLUT_KEY_F7 = 7
P_GLUT_KEY_F8 = 8
P_GLUT_KEY_F9 = 9
P_GLUT_KEY_F10 = 10
P_GLUT_KEY_F11 = 11
P_GLUT_KEY_F12 = 12
P_GLUT_KEY_LEFT = 100
P_GLUT_KEY_UP = 101
P_GLUT_KEY_RIGHT = 102
P_GLUT_KEY_DOWN = 103
P_GLUT_KEY_PAGE_UP = 104
P_GLUT_KEY_PAGE_DOWN = 105
P_GLUT_KEY_HOME = 106
P_GLUT_KEY_END = 107
P_GLUT_KEY_INSERT = 108
special_dict = {
'F1' : P_GLUT_KEY_F1,
'F2' : P_GLUT_KEY_F2,
'F3' : P_GLUT_KEY_F3,
'F4' : P_GLUT_KEY_F4,
'F5' : P_GLUT_KEY_F5,
'F6' : P_GLUT_KEY_F6,
'F7' : P_GLUT_KEY_F7,
'F8' : P_GLUT_KEY_F8,
'F9' : P_GLUT_KEY_F9,
'F10' : P_GLUT_KEY_F10,
'F11' : P_GLUT_KEY_F11,
'F12' : P_GLUT_KEY_F12,
'LEFT' : P_GLUT_KEY_LEFT,
'UP' : P_GLUT_KEY_UP,
'RIGHT' : P_GLUT_KEY_RIGHT,
'DOWN' : P_GLUT_KEY_DOWN,
'PAGE_UP' : P_GLUT_KEY_PAGE_UP,
'PAGE_DOWN' : P_GLUT_KEY_PAGE_DOWN,
'HOME' : P_GLUT_KEY_HOME,
'END' : P_GLUT_KEY_END,
'INSERT' : P_GLUT_KEY_INSERT,
}
mod_dict = {}
def get_mod_value(shift,control,meta):
global mod_dict
mod = 0
if shift:
mod = mod + P_GLUT_ACTIVE_SHIFT
if control:
mod = mod + P_GLUT_ACTIVE_CTRL
if meta:
mod = mod + P_GLUT_ACTIVE_ALT
mod_dict[(shift,control,meta)]=mod
return mod
class EmbeddedPyMOL:
def ep_get_pymol(self):
return pymol
def ep_swap_dummy(self):
pass
def ep_init(self):
_cmd.runwxpymol()
pymol._swap_buffers = lambda s=self: s.ep_swap_dummy() # dummy swap function
self.ep_mod = 0
self.ep_button = None
self.ep_swap = None
def ep_reshape(self, width, height):
_cmd.runwxpymol()
_cmd.p_glut_event(P_GLUT_RESHAPE_EVENT,width,height,0,0,0)
def ep_char(self, x, y, code, shift, control, meta):
self.ep_mod = mod_dict.get((shift,control,meta))
if self.ep_mod == None:
self.ep_mod = get_mod_value(shift,control,meta)
_cmd.runwxpymol()
_cmd.p_glut_event(P_GLUT_CHAR_EVENT,x,y,code,0,self.ep_mod)
def ep_special(self,x,y,code,shift,control,meta):
self.ep_mod = mod_dict.get((shift,control,meta))
if self.ep_mod == None:
self.ep_mod = get_mod_value(shift,control,meta)
code = special_dict.get(code)
if code!=None:
_cmd.runwxpymol()
_cmd.p_glut_event(P_GLUT_SPECIAL_EVENT,x,y,code,0,self.ep_mod)
def ep_mouse_down(self,x,y,left,middle,right,shift,control,meta):
self.ep_mod = mod_dict.get((shift,control,meta))
if self.ep_mod == None:
self.ep_mod = get_mod_value(shift,control,meta)
if left:
self.ep_button = P_GLUT_LEFT_BUTTON
elif middle:
self.ep_button = P_GLUT_MIDDLE_BUTTON
elif right:
self.ep_button = P_GLUT_RIGHT_BUTTON
_cmd.runwxpymol()
_cmd.p_glut_event(P_GLUT_MOUSE_EVENT,x,y,self.ep_button,P_GLUT_DOWN,self.ep_mod)
def ep_mouse_up(self, x, y):
if self.ep_button != None:
_cmd.runwxpymol()
_cmd.p_glut_event(P_GLUT_MOUSE_EVENT,x,y,self.ep_button,P_GLUT_UP,self.ep_mod)
self.ep_button = None
def ep_motion(self, x, y, left, middle, right, shift, control, meta):
self.ep_mod = mod_dict.get((shift,control,meta))
if self.ep_mod == None:
self.ep_mod = get_mod_value(shift,control,meta)
if left:
self.ep_button = P_GLUT_LEFT_BUTTON
elif middle:
self.ep_button = P_GLUT_MIDDLE_BUTTON
elif right:
self.ep_button = P_GLUT_RIGHT_BUTTON
_cmd.runwxpymol()
_cmd.p_glut_event(P_GLUT_MOTION_EVENT,x,y,self.ep_button,0,self.ep_mod)
def ep_passive_motion(self, x, y, shift, control, meta):
self.ep_mod = mod_dict.get((shift,control,meta))
if self.ep_mod == None:
self.ep_mod = get_mod_value(shift,control,meta)
_cmd.runwxpymol()
_cmd.p_glut_event(P_GLUT_PASSIVE_MOTION_EVENT,x,y,0,0,self.ep_mod)
def ep_wheel(self, x, y, direction, shift, control, meta):
_cmd.runwxpymol()
if direction>0:
_cmd.p_glut_event(P_GLUT_MOUSE_EVENT, x,y,P_GLUT_BUTTON_SCROLL_FORWARD,0,self.ep_mod)
else:
_cmd.p_glut_event(P_GLUT_MOUSE_EVENT, x,y,P_GLUT_BUTTON_SCROLL_BACKWARD,0,self.ep_mod)
def ep_draw(self):
_cmd.runwxpymol()
_cmd.p_glut_event(P_GLUT_DISPLAY_EVENT,0,0,0,0,0) # draw event
def ep_idle(self):
_cmd.runwxpymol()
_cmd.p_glut_event(0,0,0,0,0,0)
def ep_get_redisplay(self):
_cmd.runwxpymol()
result = _cmd.p_glut_get_redisplay()
return result
def ep_set_swap_callback(self,swap):
self.ep_swap = swap
pymol._swap_buffers = lambda s=self: s.swap()
| gratefulfrog/lib | python/pymol/embed/__init__.py | Python | gpl-2.0 | 5,893 | [
"PyMOL"
] | aaa5b8145d2084c1aa3a577595d8fa20daac893142c27a8f784d1bdfb0b8f272 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for computing privacy values for DP-SGD."""
import math
from absl import app
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp # pylint: disable=g-import-not-at-top
from tensorflow_privacy.privacy.analysis.rdp_accountant import get_privacy_spent
def apply_dp_sgd_analysis(q, sigma, steps, orders, delta):
"""Compute and print results of DP-SGD analysis."""
# compute_rdp requires that sigma be the ratio of the standard deviation of
# the Gaussian noise to the l2-sensitivity of the function to which it is
# added. Hence, sigma here corresponds to the `noise_multiplier` parameter
# in the DP-SGD implementation found in privacy.optimizers.dp_optimizer
rdp = compute_rdp(q, sigma, steps, orders)
eps, _, opt_order = get_privacy_spent(orders, rdp, target_delta=delta)
print(
'DP-SGD with sampling rate = {:.3g}% and noise_multiplier = {} iterated'
' over {} steps satisfies'.format(100 * q, sigma, steps),
end=' ')
print('differential privacy with eps = {:.3g} and delta = {}.'.format(
eps, delta))
print('The optimal RDP order is {}.'.format(opt_order))
if opt_order == max(orders) or opt_order == min(orders):
print('The privacy estimate is likely to be improved by expanding '
'the set of orders.')
return eps, opt_order
def compute_dp_sgd_privacy(n, batch_size, noise_multiplier, epochs, delta):
"""Compute epsilon based on the given hyperparameters.
Args:
n: Number of examples in the training data.
batch_size: Batch size used in training.
noise_multiplier: Noise multiplier used in training.
epochs: Number of epochs in training.
delta: Value of delta for which to compute epsilon.
Returns:
Value of epsilon corresponding to input hyperparameters.
"""
q = batch_size / n # q - the sampling ratio.
if q > 1:
raise app.UsageError('n must be larger than the batch size.')
orders = ([1.25, 1.5, 1.75, 2., 2.25, 2.5, 3., 3.5, 4., 4.5] +
list(range(5, 64)) + [128, 256, 512])
steps = int(math.ceil(epochs * n / batch_size))
return apply_dp_sgd_analysis(q, noise_multiplier, steps, orders, delta)
| tensorflow/privacy | tensorflow_privacy/privacy/analysis/compute_dp_sgd_privacy_lib.py | Python | apache-2.0 | 2,858 | [
"Gaussian"
] | 7f15a610f9fcf09daf30ddf904ac16c0adc42a048bb8909932f6001871341be8 |
# coding: utf-8
from __future__ import unicode_literals
import unittest
import sys
from pymatgen.analysis.defects.point_defects import *
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Element
from pymatgen.analysis.bond_valence import BVAnalyzer
from monty.os.path import which
from pymatgen.io.cifio import CifParser
try:
import zeo
except ImportError:
zeo = None
gulp_present = which('gulp')
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class ValenceIonicRadiusEvaluatorTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Vacancy
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0,0,0], [0.5,0.5,0], [0.5,0,0.5], [0,0.5,0.5],
[0.5,0,0], [0,0.5,0], [0,0,0.5], [0.5,0.5,0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
self._mgo_valrad_evaluator = ValenceIonicRadiusEvaluator(self._mgo_uc)
#self._si = Cssr.from_file("../../../../test_files/Si.cssr").structure
#self._ci_valrad_evaluator = ValenceIonicRadiusEvaluator(self._si)
def test_valences_ionic_structure(self):
valence_dict = self._mgo_valrad_evaluator.valences
for val in list(valence_dict.values()):
self.assertTrue(val in {2, -2})
def test_radii_ionic_structure(self):
radii_dict = self._mgo_valrad_evaluator.radii
for rad in list(radii_dict.values()):
self.assertTrue(rad in {0.86, 1.26})
class ValenceIonicRadiusEvaluatorMultiOxiTest(unittest.TestCase):
def setUp(self):
"""
Setup Fe3O4 structure for testing multiple oxidation states
"""
cif_ob = CifParser(os.path.join(test_dir, "Fe3O4.cif"))
self._struct = cif_ob.get_structures()[0]
self._valrad_evaluator = ValenceIonicRadiusEvaluator(self._struct)
self._length = len(self._struct.sites)
def test_valences_ionic_structure(self):
valence_set = set(self._valrad_evaluator.valences.values())
self.assertEqual(valence_set, {2,3,-2})
def test_radii_ionic_structure(self):
radii_set = set(self._valrad_evaluator.radii.values())
self.assertEqual(len(radii_set), 3)
self.assertEqual(radii_set, {0.72,0.75,1.26})
@unittest.skipIf(not zeo, "zeo not present.")
class VacancyTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Vacancy
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
bv = BVAnalyzer()
self._mgo_uc = bv.get_oxi_state_decorated_structure(self._mgo_uc)
self._mgo_val_rad_eval = ValenceIonicRadiusEvaluator(self._mgo_uc)
self._mgo_val = self._mgo_val_rad_eval.valences
self._mgo_rad = self._mgo_val_rad_eval.radii
self._mgo_vac = Vacancy(self._mgo_uc, self._mgo_val, self._mgo_rad)
def test_defectsite_count(self):
self.assertTrue(self._mgo_vac.defectsite_count() == 2,
"Vacancy count wrong")
def test_enumerate_defectsites(self):
"""
The vacancy sites should be the lattice sites.
And there should be only two unique vacancy sites for MgO.
"""
uniq_sites = []
uniq_sites.append(self._mgo_uc.sites[3])
uniq_sites.append(self._mgo_uc.sites[7])
uniq_def_sites = self._mgo_vac.enumerate_defectsites()
#Match uniq_sites iwth uniq_def_sites
#self.assertTrue(len(uniq_def_sites) == 2, "Vacancy init failed")
#mgo_spg = Spacegroup(int_number=225)
#self.assertTrue(mgo_spg.are_symmetrically_equivalent(uniq_sites,
# uniq_def_sites), "Vacancy init failed")
def test_get_defectsite_index(self):
for i in range(self._mgo_vac.defectsite_count()):
self.assertTrue(self._mgo_vac.get_defectsite_structure_index(i) <
len(self._mgo_uc.sites),
"Defect site index beyond range")
def test_gt_defectsite_coordination_number(self):
for i in range(self._mgo_vac.defectsite_count()):
self.assertTrue(
round(self._mgo_vac.get_defectsite_coordination_number(
i)) == 6.0, "Wrong coordination number")
def test_get_defectsite_coordinated_elements(self):
for i in range(self._mgo_vac.defectsite_count()):
site_index = self._mgo_vac.get_defectsite_structure_index(i)
site_el = self._mgo_uc[site_index].species_string
self.assertTrue(
site_el not in self._mgo_vac.get_coordinated_elements(
i), "Coordinated elements are wrong")
def test_get_defectsite_effective_charge(self):
for i in range(self._mgo_vac.defectsite_count()):
site_index = self._mgo_vac.get_defectsite_structure_index(i)
site_el = self._mgo_uc[site_index].species_and_occu
eff_charge = self._mgo_vac.get_defectsite_effective_charge(i)
if site_el["Mg2+"] == 1:
self.assertEqual(eff_charge, -2)
if site_el["O2-"] == 1:
self.assertEqual(eff_charge, 2)
def test_get_coordinatedsites_min_max_charge(self):
for i in range(self._mgo_vac.defectsite_count()):
min_chrg, max_chrg = self._mgo_vac.get_coordsites_min_max_charge(i)
self.assertEqual(min_chrg, max_chrg)
def test_make_supercells_with_defects(self):
scaling_matrix = [2,2,2]
vac_specie = ['Mg']
vac_scs = self._mgo_vac.make_supercells_with_defects(
scaling_matrix, vac_specie)
expected_structure_formulae = ["Mg32 O32", "Mg32 O31", "Mg31 O32"]
self.assertEqual(len(vac_scs),2)
for sc in vac_scs:
self.assertIn(sc.formula, expected_structure_formulae)
vac_scs = self._mgo_vac.make_supercells_with_defects(scaling_matrix)
expected_structure_formulae = ["Mg32 O32", "Mg32 O31", "Mg31 O32"]
self.assertEqual(len(vac_scs),3)
for sc in vac_scs:
self.assertIn(sc.formula, expected_structure_formulae)
vac_scs = self._mgo_vac.make_supercells_with_defects(
scaling_matrix,limit_return_structures=1)
expected_structure_formulae = ["Mg32 O32", "Mg32 O31", "Mg31 O32"]
self.assertEqual(len(vac_scs),2)
for sc in vac_scs:
self.assertIn(sc.formula, expected_structure_formulae)
@unittest.skip("deprecated")
def test_get_volume(self):
for i in range(self._mgo_vac.defectsite_count()):
vol = self._mgo_vac.get_volume(i)
#Once the zeo++ is properly working, make sure vol is +ve
self.assertIsInstance(vol, float)
@unittest.skip("deprecated")
def test_get_surface_area(self):
for i in range(self._mgo_vac.defectsite_count()):
sa = self._mgo_vac.get_surface_area(i)
#Once the zeo++ is properly working, make sure vol is +ve
self.assertIsInstance(sa, float)
@unittest.skipIf(not gulp_present, "gulp not present.")
class VacancyFormationEnergyTest(unittest.TestCase):
def setUp(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self.mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
mgo_valrad_eval = ValenceIonicRadiusEvaluator(self.mgo_uc)
val = mgo_valrad_eval.valences
rad = mgo_valrad_eval.radii
self.mgo_vac = Vacancy(self.mgo_uc, val, rad)
self.mgo_vfe = VacancyFormationEnergy(self.mgo_vac)
def test_get_energy(self):
for i in range(len(self.mgo_vac.enumerate_defectsites())):
vfe = self.mgo_vfe.get_energy(i)
print(vfe)
self.assertIsInstance(vfe, float)
@unittest.skipIf(not zeo, "zeo not present.")
class InterstitialTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Interstitial
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
mgo_val_rad_eval = ValenceIonicRadiusEvaluator(self._mgo_uc)
self._mgo_val = mgo_val_rad_eval.valences
self._mgo_rad = mgo_val_rad_eval.radii
self._mgo_interstitial = Interstitial(
self._mgo_uc, self._mgo_val, self._mgo_rad
)
def test_enumerate_defectsites(self):
"""
The interstitial sites should be within the lattice
"""
uniq_def_sites = self._mgo_interstitial.enumerate_defectsites()
self.assertTrue(len(uniq_def_sites) == 2, "Interstitial init failed")
def test_defectsite_count(self):
print(self._mgo_interstitial.defectsite_count())
self.assertTrue(self._mgo_interstitial.defectsite_count() == 2,
"Vacancy count wrong")
def test_get_defectsite_coordination_number(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_defectsite_coordination_number(
i))
def test_get_coordinated_sites(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordinated_sites(
i))
def test_get_coordsites_charge_sum(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordsites_charge_sum(
i))
def test_get_defectsite_coordinated_elements(self):
struct_el = self._mgo_uc.composition.elements
for i in range(self._mgo_interstitial.defectsite_count()):
for el in self._mgo_interstitial.get_coordinated_elements(i):
self.assertTrue(
Element(el) in struct_el, "Coordinated elements are wrong"
)
def test_get_radius(self):
for i in range(self._mgo_interstitial.defectsite_count()):
rad = self._mgo_interstitial.get_radius(i)
print(rad)
self.assertTrue(rad, float)
@unittest.skipIf(not zeo, "zeo not present.")
class InterstitialVoronoiFaceCenterTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Interstitial
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
mgo_val_rad_eval = ValenceIonicRadiusEvaluator(self._mgo_uc)
self._mgo_val = mgo_val_rad_eval.valences
self._mgo_rad = mgo_val_rad_eval.radii
self._mgo_interstitial = Interstitial(
self._mgo_uc, self._mgo_val, self._mgo_rad,
site_type='voronoi_facecenter'
)
def test_enumerate_defectsites(self):
"""
The interstitial sites should be within the lattice
"""
uniq_def_sites = self._mgo_interstitial.enumerate_defectsites()
print("Length of voronoi face centers", len(uniq_def_sites))
self.assertTrue(len(uniq_def_sites) == 2, "Defect site count wrong")
def test_defectsite_count(self):
print(self._mgo_interstitial.defectsite_count())
self.assertTrue(self._mgo_interstitial.defectsite_count() == 2,
"Vacancy count wrong")
def test_get_defectsite_coordination_number(self):
for i in range(self._mgo_interstitial.defectsite_count()):
coord_no=self._mgo_interstitial.get_defectsite_coordination_number(
i)
self.assertTrue(isinstance(coord_no, float))
def test_get_coordinated_sites(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordinated_sites(
i))
def test_get_coordsites_charge_sum(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordsites_charge_sum(
i))
def test_get_defectsite_coordinated_elements(self):
struct_el = self._mgo_uc.composition.elements
for i in range(self._mgo_interstitial.defectsite_count()):
for el in self._mgo_interstitial.get_coordinated_elements(i):
self.assertTrue(
Element(el) in struct_el, "Coordinated elements are wrong"
)
def test_get_radius(self):
for i in range(self._mgo_interstitial.defectsite_count()):
rad = self._mgo_interstitial.get_radius(i)
self.assertAlmostEqual(rad,0.0)
@unittest.skipIf(not zeo, "zeo not present.")
class InterstitialHighAccuracyTest(unittest.TestCase):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Interstitial
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
mgo_val_rad_eval = ValenceIonicRadiusEvaluator(self._mgo_uc)
self._mgo_val = mgo_val_rad_eval.valences
self._mgo_rad = mgo_val_rad_eval.radii
self._mgo_interstitial = Interstitial(
self._mgo_uc, self._mgo_val, self._mgo_rad, accuracy='High'
)
def test_enumerate_defectsites(self):
"""
The interstitial sites should be within the lattice
"""
uniq_def_sites = self._mgo_interstitial.enumerate_defectsites()
for site in uniq_def_sites:
self.assertIsInstance(site, PeriodicSite, "Returned objects are not sites")
#print len(uniq_def_sites)
#self.assertTrue(len(uniq_def_sites) == 2, "Interstitial init failed")
def test_defectsite_count(self):
self.assertIsNotNone(self._mgo_interstitial.defectsite_count(),
"Interstitial count wrong")
def test_get_defectsite_coordination_number(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_defectsite_coordination_number(
i))
def test_get_coordinated_sites(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordinated_sites(
i))
def test_get_coordsites_charge_sum(self):
for i in range(self._mgo_interstitial.defectsite_count()):
print(self._mgo_interstitial.get_coordsites_charge_sum(
i))
def test_get_defectsite_coordinated_elements(self):
struct_el = self._mgo_uc.composition.elements
for i in range(self._mgo_interstitial.defectsite_count()):
for el in self._mgo_interstitial.get_coordinated_elements(i):
self.assertTrue(
Element(el) in struct_el, "Coordinated elements are wrong"
)
def test_get_radius(self):
for i in range(self._mgo_interstitial.defectsite_count()):
rad = self._mgo_interstitial.get_radius(i)
print(rad)
self.assertTrue(rad, float)
@unittest.skipIf(not (gulp_present and zeo), "gulp or zeo not present.")
class InterstitialAnalyzerTest(unittest.TestCase):
def setUp(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self.mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
mgo_valrad_eval = ValenceIonicRadiusEvaluator(self.mgo_uc)
val = mgo_valrad_eval.valences
rad = mgo_valrad_eval.radii
self.mgo_val = val
self.mgo_rad = rad
self.mgo_inter = Interstitial(self.mgo_uc, val, rad)
self.mgo_ia = InterstitialAnalyzer(self.mgo_inter, 'Mg', 2)
def test_get_relaxedenergy(self):
for i in range(len(self.mgo_inter.enumerate_defectsites())):
ife = self.mgo_ia.get_energy(i, True)
site_coords = self.mgo_inter.get_defectsite(i).coords
site_radius = self.mgo_inter.get_radius(i)
print(i, site_coords, site_radius, ife)
self.assertIsInstance(ife, float)
def test_get_norelaxedenergy(self):
for i in range(self.mgo_inter.defectsite_count()):
ife = self.mgo_ia.get_energy(i, False)
site_coords = self.mgo_inter.get_defectsite(i).coords
site_radius = self.mgo_inter.get_radius(i)
print(i, site_coords, site_radius, ife)
self.assertIsInstance(ife, float)
def test_get_percentage_volume_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_vol = self.mgo_ia.get_percentage_volume_change(i)
print(i, del_vol)
def test_get_percentage_lattice_parameter_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_lat = self.mgo_ia.get_percentage_lattice_parameter_change(i)
print(i, del_lat)
def test_get_percentage_bond_distance_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_bd = self.mgo_ia.get_percentage_bond_distance_change(i)
print(i, del_bd)
def test_relaxed_structure_match(self):
for i in range(self.mgo_inter.defectsite_count()):
for j in range(self.mgo_inter.defectsite_count()):
match = self.mgo_ia.relaxed_structure_match(i, j)
print(i, j, match)
if i == j:
self.assertTrue(match)
@unittest.skipIf(not (gulp_present and zeo), "gulp or zeo not present.")
class InterstitialStructureRelaxerTest(unittest.TestCase):
def setUp(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self.mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
mgo_valrad_eval = ValenceIonicRadiusEvaluator(self.mgo_uc)
val = mgo_valrad_eval.valences
rad = mgo_valrad_eval.radii
self.mgo_val = val
self.mgo_rad = rad
self.mgo_inter = Interstitial(self.mgo_uc, val, rad)
self.isr = InterstitialStructureRelaxer(self.mgo_inter, 'Mg', 2)
def test_relaxed_structure_match(self):
for i in range(self.mgo_inter.defectsite_count()):
for j in range(self.mgo_inter.defectsite_count()):
match = self.isr.relaxed_structure_match(i, j)
#print i, j, match
if i == j:
self.assertTrue(match)
def test_relaxed_energy_match(self):
for i in range(self.mgo_inter.defectsite_count()):
for j in range(self.mgo_inter.defectsite_count()):
match = self.isr.relaxed_energy_match(i, j)
#print i, j, match
if i == j:
self.assertTrue(match)
def test_get_relaxed_structure(self):
for i in range(self.mgo_inter.defectsite_count()):
relax_struct = self.isr.get_relaxed_structure(i)
self.assertIsInstance(relax_struct, Structure)
def test_get_relaxed_energy(self):
for i in range(self.mgo_inter.defectsite_count()):
energy = self.isr.get_relaxed_energy(i)
self.assertIsInstance(energy, float)
def test_get_relaxed_interstitial(self):
ri = self.isr.get_relaxed_interstitial()
self.assertIsInstance(ri, RelaxedInterstitial)
@unittest.skipIf(not (gulp_present and zeo), "gulp or zeo not present.")
class RelaxedInsterstitialTest(unittest.TestCase):
def setUp(self):
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self.mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True, True)
mgo_valrad_eval = ValenceIonicRadiusEvaluator(self.mgo_uc)
val = mgo_valrad_eval.valences
rad = mgo_valrad_eval.radii
self.mgo_val = val
self.mgo_rad = rad
self.mgo_inter = Interstitial(self.mgo_uc, val, rad)
isr = InterstitialStructureRelaxer(self.mgo_inter, 'Mg', 2)
self.ri = isr.get_relaxed_interstitial()
def test_formation_energy(self):
for i in range(self.mgo_inter.defectsite_count()):
ife = self.ri.formation_energy(i)
self.assertIsInstance(ife, float)
print("ife", ife)
def test_get_percentage_volume_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_vol = self.ri.get_percentage_volume_change(i)
self.assertIsInstance(del_vol, float)
print("del_vol", del_vol)
def test_get_percentage_lattice_parameter_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_lat = self.ri.get_percentage_lattice_parameter_change(i)
self.assertNotEqual(del_lat['a'], 0)
self.assertNotEqual(del_lat['b'], 0)
self.assertNotEqual(del_lat['c'], 0)
print("del_lat", del_lat)
def test_get_percentage_bond_distance_change(self):
for i in range(self.mgo_inter.defectsite_count()):
del_bd = self.ri.get_percentage_bond_distance_change(i)
#self.assertIsInstance(del_bd, float)
#print del_bd
if __name__ == "__main__":
unittest.main()
#suite = unittest.TestLoader().loadTestsFromTestCase(ValenceIonicRadiusEvaluatorTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(InterstitialTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(VacancyTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(VacancyFormationEnergyTest)
#suite = unittest.TestLoader().loadTestsFromTestCase(InterstitialAnalyzerTest)
#unittest.TextTestRunner(verbosity=3).run(suite)
| yanikou19/pymatgen | pymatgen/analysis/defects/tests/test_point_defects.py | Python | mit | 23,697 | [
"GULP",
"pymatgen"
] | 19239d1ef9162bca49b61c6324fb56d069340e5e2890cdcfd73209ff78b0ffbc |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015, 2016 Adam.Dybbroe
# Author(s):
# Adam.Dybbroe <adam.dybbroe@smhi.se>
# Panu Lahtinen <panu.lahtinen@fmi.fi>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""PPS netcdf cloud product reader
"""
import os.path
from ConfigParser import ConfigParser
from ConfigParser import NoOptionError
from glob import glob
from datetime import datetime
import numpy as np
import h5py
from trollsift import Parser
import mpop.channel
from mpop import CONFIG_PATH
from mpop.plugin_base import Reader
import logging
LOG = logging.getLogger(__name__)
class InconsistentDataDimensions(Exception):
"""Exception for inconsistent dimensions in the data"""
pass
def unzip_file(filename):
"""Unzip the file if file is bzipped = ending with 'bz2'"""
import tempfile
import bz2
if filename.endswith('bz2'):
bz2file = bz2.BZ2File(filename)
tmpfilename = tempfile.mktemp()
try:
ofpt = open(tmpfilename, 'wb')
ofpt.write(bz2file.read())
ofpt.close()
except IOError:
import traceback
traceback.print_exc()
LOG.info("Failed to read bzipped file %s", str(filename))
os.remove(tmpfilename)
return None
return tmpfilename
return None
class GeolocationFlyweight(object):
"""Flyweight-thingy for geolocation:
http://yloiseau.net/articles/DesignPatterns/flyweight/
"""
def __init__(self, cls):
self._cls = cls
self._instances = dict()
def __call__(self, *args, **kargs):
"""
we assume that this is only used for the gelocation object,
filenames are listed in the second argument
"""
return self._instances.setdefault(tuple(args[1]),
self._cls(*args, **kargs))
def clear_cache(self):
"""Clear cache"""
del self._instances
self._instances = dict()
#@GeolocationFlyweight
class PpsGeolocationData(object):
'''Class for holding PPS geolocation data'''
def __init__(self, shape, granule_lengths, filenames):
self.filenames = filenames
self.shape = shape
self.granule_lengths = granule_lengths
self.longitudes = None
self.latitudes = None
self.row_indices = None
self.col_indices = None
self.mask = None
def read(self):
"""
Read longitudes and latitudes from geo filenames and assemble
"""
if self.longitudes is not None:
return self
self.longitudes = np.empty(self.shape,
dtype=np.float32)
self.latitudes = np.empty(self.shape,
dtype=np.float32)
self.mask = np.zeros(self.shape,
dtype=np.bool)
swath_index = 0
for idx, filename in enumerate(self.filenames):
y0_ = swath_index
y1_ = swath_index + self.granule_lengths[idx]
swath_index = swath_index + self.granule_lengths[idx]
get_lonlat_into(filename,
self.longitudes[y0_:y1_, :],
self.latitudes[y0_:y1_, :],
self.mask[y0_:y1_, :])
self.longitudes = np.ma.array(self.longitudes,
mask=self.mask,
copy=False)
self.latitudes = np.ma.array(self.latitudes,
mask=self.mask,
copy=False)
LOG.debug("Geolocation read in for %s", str(self))
return self
class HDF5MetaData(object):
"""
Small class for inspecting a HDF5 file and retrieve its metadata/header
data. It is developed for JPSS/NPP data but is really generic and should
work on most other hdf5 files.
Supports
"""
def __init__(self, filename):
self.metadata = {}
self.filename = filename
if not os.path.exists(filename):
raise IOError("File %s does not exist!" % filename)
def read(self):
"""Read the metadata"""
filename = self.filename
unzipped = unzip_file(filename)
if unzipped:
filename = unzipped
with h5py.File(filename, 'r') as h5f:
h5f.visititems(self.collect_metadata)
self._collect_attrs('/', h5f.attrs)
if unzipped:
os.remove(unzipped)
return self
def _collect_attrs(self, name, attrs):
"""Collect atributes"""
for key in attrs.keys():
# Throws a TypeError if key==DIMENSION_LIST and the value
# is accessed
# Observed at FMI (Panu) - maybe hdf5 version specific?
# Should preferably be handled elsewhere and not in this generic class
# FIXME!
if key in ['DIMENSION_LIST']:
continue
value = np.squeeze(attrs[key])
if issubclass(value.dtype.type, str):
self.metadata["%s/attr/%s" % (name, key)] = str(value)
else:
self.metadata["%s/attr/%s" % (name, key)] = value
def collect_metadata(self, name, obj):
"""Collect metadata"""
if isinstance(obj, h5py.Dataset):
self.metadata["%s/shape" % name] = obj.shape
self._collect_attrs(name, obj.attrs)
def __getitem__(self, key):
long_key = None
for mkey in self.metadata.keys():
if mkey.endswith(key):
if long_key is not None:
raise KeyError("Multiple keys called %s" % key)
long_key = mkey
break
return self.metadata[long_key]
def keys(self):
"""Return metadata dictionary keys"""
return self.metadata.keys()
def get_data_keys(self):
"""Get data keys from the metadata"""
data_keys = []
for key in self.metadata.keys():
if key.endswith("/shape"):
data_key = key.split("/shape")[0]
data_keys.append(data_key)
return data_keys
def get_data_keys_and_shapes(self):
"""Get data keys and array shapes from the metadata"""
data_keys = {}
for key in self.metadata.keys():
if key.endswith("/shape"):
data_key = key.split("/shape")[0]
shape = self.metadata[key]
data_keys[data_key] = shape
return data_keys
class PPSMetaData(HDF5MetaData):
"""Class for holding PPS metadata"""
def get_shape(self):
"""Get array shapes from metadata"""
n_x = 0
n_y = 0
for key in self.metadata:
if key.endswith('nx/shape'):
n_x = self.metadata[key][0]
if key.endswith('ny/shape'):
n_y = self.metadata[key][0]
return n_x, n_y
def get_header_info(self):
"""Get platform name, orbit number and time slot as dictionary"""
info = {}
for key in self.metadata:
if key.endswith('platform'):
info['platform_name'] = self.metadata[key]
elif key.endswith('orbit_number'):
info['orbit'] = self.metadata[key]
elif key.endswith('time_coverage_start'):
info['time_slot'] = datetime.strptime(self.metadata[key][:-2],
"%Y%m%dT%H%M%S")
return info
def get_dataset_attributes(self, var_name):
"""Get dataset attributes"""
retv = {}
for key in self.metadata:
if key.split('/')[0] == var_name and key.find('attr') > 0:
dictkey = key.split('/')[-1]
if dictkey in ['DIMENSION_LIST']:
continue
retv[dictkey] = self.metadata[key]
return retv
def get_root_attributes(self):
"""Get attributes of the root directory"""
retv = {}
for key in self.metadata:
if key.startswith('//attr'):
dictkey = key.split('/')[-1]
retv[dictkey] = self.metadata[key]
return retv
def get_filenames(scene, products, conf, time_interval, area_name):
"""Get list of filenames within time interval"""
filename = conf.get(scene.instrument_name + "-level3",
"cloud_product_filename",
raw=True,
vars=os.environ)
directory = conf.get(scene.instrument_name + "-level3",
"cloud_product_dir",
vars=os.environ)
pathname_tmpl = os.path.join(directory, filename)
starttime, endtime = time_interval
if not scene.orbit:
orbit = ""
else:
orbit = scene.orbit
flist_allproducts = []
for product in products:
values = {"area": area_name,
"satellite": scene.satname + scene.number,
"product": product}
if endtime:
# Okay, we need to check for more than one granule/swath!
# First get all files with all times matching in directory:
values["orbit"] = '?????'
filename_tmpl = os.path.join(directory,
globify_date(filename) % values)
else:
values["orbit"] = str(orbit).zfill(5) or "*"
filename_tmpl = scene.time_slot.strftime(
pathname_tmpl) % values
LOG.debug("File path = %s", str(filename_tmpl))
file_list = glob(filename_tmpl)
if len(file_list) == 0:
LOG.warning("No %s product matching", str(product))
elif len(file_list) > 1 and not endtime:
LOG.warning("More than 1 file matching for %s: %s",
str(product), str(file_list))
file_list = []
elif len(file_list) > 1:
file_list = extract_filenames_in_time_window(
file_list, starttime, endtime)
if len(file_list) == 0:
LOG.warning("No files found matching time window for product %s",
product)
flist_allproducts = flist_allproducts + file_list
return flist_allproducts
def extract_filenames_in_time_window(file_list, starttime, endtime):
"""Extract the filenames with time inside the time interval specified.
NB! Only tested for EARS-NWC granules. This does not support assembling
several locally received full swaths"""
# New EARS-NWC filenames:
# Ex.:
# W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,NOAA19+CT_C_EUMS_20150819124700_\
# 33643.nc.bz2
pnew = Parser(EARS_PPS_FILE_MASK)
# Old EARS-NWC filenames:
# Ex.:
# ctth_20130910_205300_metopb.h5.bz2
pold = Parser("{product:s}_{starttime:%Y%m%d_%H%M}00_{platform_name:s}.h5"
"{compression:s}")
plocal = Parser(LOCAL_PPS_FILE_MASK)
valid_filenames = []
valid_times = []
LOG.debug("Time window: (%s, %s)", str(starttime), str(endtime))
for fname in file_list:
try:
data = pnew.parse(os.path.basename(fname))
except ValueError:
try:
data = pold.parse(os.path.basename(fname))
except ValueError:
data = plocal.parse(os.path.basename(fname))
if (data['starttime'] >= starttime and
data['starttime'] < endtime):
valid_filenames.append(fname)
valid_times.append(data['starttime'])
LOG.debug("Start time %s inside window", str(data['starttime']))
else:
pass
# Can we rely on the files being sorted according to time?
# Sort the filenames according to time:
vtimes = np.array(valid_times)
idx = np.argsort(vtimes)
vfiles = np.array(valid_filenames)
return np.take(vfiles, idx).tolist()
class InfoObject(object):
"""Simple data and info container.
"""
def __init__(self):
self.info = {}
self.data = None
class NwcSafPpsChannel(mpop.channel.GenericChannel):
"""Class for NWC-SAF PPS channel data"""
def __init__(self):
mpop.channel.GenericChannel.__init__(self)
self.mda = {}
self._projectables = []
self.shape = None
self.granule_lengths = None
self.filenames = None
self.platform_name = None
self.begin_time = None
self.end_time = None
self.orbit_begin = None
self.orbit_end = None
def read(self, pps_product):
"""Read the PPS v2014 formated data"""
LOG.debug("Read the PPS product data...")
self._projectables = pps_product.projectables
self.granule_lengths = pps_product.granule_lengths
self.shape = pps_product.shape
self.filenames = pps_product.filenames
self.orbit_begin = pps_product.orbit_begin
self.orbit_end = pps_product.orbit_end
self.platform_name = pps_product.platform_name
self.begin_time = pps_product.begin_time
self.end_time = pps_product.end_time
# Take the metadata of the first granule and store as global
#self.mda = pps_product.metadata[0].metadata
mda = pps_product.metadata[0]
self.mda = mda.metadata
self.mda.update(mda.get_root_attributes())
for var_name in pps_product.mda.keys():
setattr(self, var_name, InfoObject())
# Fill the info dict...
getattr(self, var_name).info = mda.get_dataset_attributes(var_name)
try:
getattr(self, var_name).data = self.mda[var_name]
except KeyError:
continue
for var_name in self._projectables:
setattr(self, var_name, InfoObject())
# Fill the info dict...
getattr(self, var_name).info = mda.get_dataset_attributes(var_name)
getattr(self, var_name).data = \
np.ma.masked_array(pps_product.raw_data[var_name],
mask=pps_product.mask[var_name],
fill_value=pps_product.fill_value[var_name])
return
def project(self, coverage):
"""Project the data"""
LOG.debug("Projecting channel %s...", self.name)
import copy
res = copy.copy(self)
# Project the data
for var in self._projectables:
LOG.info("Projecting %s", str(var))
res.__dict__[var] = copy.copy(self.__dict__[var])
res.__dict__[var].data = coverage.project_array(
self.__dict__[var].data)
res.name = self.name
res.resolution = self.resolution
res.filled = True
res.area = coverage.out_area
return res
def is_loaded(self):
"""Tells if the channel contains loaded data.
"""
return len(self._projectables) > 0
def save(self, filename, old=True, **kwargs):
"""Save to old format"""
del kwargs
if old:
try:
from nwcsaf_formats.ppsv2014_to_oldformat import write_product
write_product(self, filename)
except ImportError:
LOG.error("Could not save to old format")
raise
else:
raise NotImplementedError("Can't save to new pps format yet.")
class PPSProductData(object):
"""Placeholder for the PPS netCDF product data. Reads the
netCDF files using h5py. One file for each product and granule/swath.
"""
def __init__(self, filenames):
self.filenames = filenames
self.mda = {}
self.projectables = []
self._keys = []
self._refs = {}
self.shape = None
self.product_name = 'unknown'
self.platform_name = None
self.begin_time = None
self.end_time = None
self.orbit_begin = None
self.orbit_end = None
self.metadata = []
self.raw_data = {}
self.mask = {}
self.fill_value = {}
self.granule_lengths = []
def read(self):
"""Read the PPS v2014 formatet data"""
self._read_metadata()
for key in self.raw_data:
LOG.debug("Shape of data: %s", str(self.raw_data[key].shape))
break
self._read_data()
return self
def _set_members(self, hdd):
'''Set platform_name, time_slot and orbit class members'''
if not self.platform_name and 'platform_name' in hdd:
self.platform_name = hdd['platform_name']
if not self.begin_time and 'time_slot' in hdd:
self.begin_time = hdd['time_slot']
if 'time_slot' in hdd:
self.end_time = hdd['time_slot']
if not self.orbit_begin and 'orbit' in hdd:
self.orbit_begin = int(hdd['orbit'])
if 'orbit' in hdd:
self.orbit_end = int(hdd['orbit'])
def _read_metadata(self):
"""Read metadata from all the files"""
LOG.debug("Filenames: %s", str(self.filenames))
swath_length = 0
swath_width = None
for fname in self.filenames:
LOG.debug("Get and append metadata from file: %s", str(fname))
mda = PPSMetaData(fname).read()
# Set the product_name variable:
try:
self.product_name = mda['product_name']
except KeyError:
LOG.warning("No product_name in file!")
width, granule_length = mda.get_shape()
hdd = mda.get_header_info()
self._set_members(hdd)
self.metadata.append(mda)
self.granule_lengths.append(granule_length)
if swath_width:
if swath_width != width:
raise InconsistentDataDimensions('swath_width not the same '
'between granules: %d %d',
swath_width, width)
swath_width = width
swath_length = swath_length + granule_length
# Take the first granule, and find what data fields it contains
# and assume all granules have those same data fields:
mda = self.metadata[0]
dks = mda.get_data_keys_and_shapes()
geolocation_fields = ['lon', 'lat', 'lat_reduced', 'lon_reduced']
coordinate_fields = ['nx', 'nx_reduced', 'ny', 'ny_reduced']
for key in dks:
if key in geolocation_fields + coordinate_fields:
LOG.debug("Key = %s", str(key))
continue
shape = dks[key]
if len(shape) == 3 and shape[0] == 1:
shape = shape[1], shape[2]
if shape == (self.granule_lengths[0], swath_width):
self.projectables.append(key)
else:
self.mda.update({key: dks[key]})
# LOG.debug("Key, shape, granule_length, swath_width: %s %s %s %s",
# str(key), str(shape),
# str(self.granule_lengths[0]), str(swath_width))
# initiate data arrays
self.shape = swath_length, swath_width
# for field in dks:
# if field in geolocation_fields + coordinate_fields:
# continue
# try:
# dtype = mda[field + '/attr/valid_range'].dtype
# self.raw_data[str(field)] = np.zeros(self.shape, dtype=dtype)
# self.mask[field] = np.zeros(self.shape, dtype=np.bool)
# except KeyError:
# continue
for field in self.projectables:
dtype = mda[field + '/attr/valid_range'].dtype
try:
if not (np.equal(1.0 + mda[field + '/attr/add_offset'], 1.0) and
np.equal(1.0 * mda[field + '/attr/scale_factor'], 1.0)):
dtype = np.float32
except KeyError:
pass
self.raw_data[str(field)] = np.zeros(self.shape, dtype=dtype)
self.mask[field] = np.zeros(self.shape, dtype=np.bool)
def _read_data(self):
"""Loop over all granules and read one granule at a time and
fill the data arrays"""
LOG.debug("Read all %s product files...", self.product_name)
swath_index = 0
for idx, mda in enumerate(self.metadata):
del mda
filename = self.filenames[idx]
unzipped = unzip_file(filename)
if unzipped:
filename = unzipped
h5f = h5py.File(filename, 'r')
variables = {}
for key, item in h5f.items():
if item.attrs.get("CLASS") != 'DIMENSION_SCALE':
variables[key] = item
# processed variables
processed = set()
non_processed = set(variables.keys()) - processed
fields = {}
for var_name in non_processed:
if var_name in ['lon', 'lat', 'lon_reduced', 'lat_reduced']:
continue
var = variables[var_name]
if ("standard_name" not in var.attrs.keys() and
"long_name" not in var.attrs.keys()):
LOG.warning("Data field %s is lacking both "
"standard_name and long_name",
var_name)
continue
if var_name not in self.projectables:
self.metadata[idx].metadata[var_name] = var[:]
continue
data = var[:]
if len(data.shape) == 3 and data.shape[0] == 1:
data = data[0]
if 'valid_range' in var.attrs.keys():
data = np.ma.masked_outside(
data, *var.attrs['valid_range'])
elif '_FillValue' in var.attrs.keys():
data = np.ma.masked_where(data, var.attrs['_FillValue'])
if "scale_factor" in var.attrs.keys() and \
"add_offset" in var.attrs.keys():
dataset = (data * var.attrs.get("scale_factor", 1)
+ var.attrs.get("add_offset", 0))
else:
dataset = data.copy()
if '_FillValue' in var.attrs.keys():
dataset.fill_value = var.attrs['_FillValue'][0]
fields[var_name] = dataset
LOG.debug("long_name: %s", str(var.attrs['long_name']))
LOG.debug("Var = %s, shape = %s",
str(var_name), str(dataset.shape))
processed |= set([var_name])
non_processed = set(variables.keys()) - processed
if len(non_processed) > 0:
LOG.warning("Remaining non-processed variables: %s",
str(non_processed))
h5f.close()
if unzipped:
os.remove(unzipped)
y0_ = swath_index
y1_ = swath_index + self.granule_lengths[idx]
swath_index = swath_index + self.granule_lengths[idx]
for key in self.raw_data.keys():
if key not in self.projectables:
continue
try:
self.raw_data[key][y0_:y1_, :] = fields[key].data
self.mask[key][y0_:y1_, :] = fields[key].mask
self.fill_value[key] = fields[key].fill_value
except ValueError:
LOG.exception('Mismatch in dimensions: y0_, y1_, '
'fields[key].data.shape: %s %s %s',
str(y0_), str(y1_),
str(fields[key].data.shape))
raise
return
GEO_PRODUCT_NAME_DEFAULT = 'CMA'
PPS_PRODUCTS = set(['CMA', 'CT', 'CTTH', 'PC', 'CPP'])
LOCAL_PPS_FILE_MASK = ('S_NWC_{product:s}_{platform_name:s}_{orbit:5d}_' +
'{starttime:%Y%m%dT%H%M%S}{dummy:1d}Z_' +
'{starttime:%Y%m%dT%H%M%S}{dummy2:1d}Z.nc{compression:s}')
EARS_PPS_FILE_MASK = ("W_XX-EUMETSAT-Darmstadt,SING+LEV+SAT,{platform_name:s}+"
"{product:s}_C_EUMS_{starttime:%Y%m%d%H%M}00_{orbit:05d}.nc"
"{compression:s}")
# Old EARS-NWC filenames:
# Ex.:
# ctth_20130910_205300_metopb.h5.bz2
EARS_OLD_PPS_FILE_MASK = ("{product:s}_{starttime:%Y%m%d_%H%M}00_" +
"{platform_name:s}.h5{compression:s}")
class PPSReader(Reader):
"""Reader class for PPS files"""
pformat = "nc_pps_l2"
def __init__(self, *args, **kwargs):
Reader.__init__(self, *args, **kwargs)
# Source of the data, 'local' or 'ears'
self._source = None
# Parser for getting info from the file names
self._parser = None
# Satellite config
self._config = None
# Location of geolocation files, required for 'local' products
self._cloud_product_geodir = None
# Name of the product having geolocation for 'local' products
self._geolocation_product_name = None
def _read_config(self, sat_name, instrument_name):
'''Read config for the satellite'''
if self._config:
return
self._config = ConfigParser()
configfile = os.path.join(CONFIG_PATH, sat_name + ".cfg")
LOG.debug("Read configfile %s", configfile)
self._config.read(configfile)
try:
self._cloud_product_geodir = \
self._config.get(instrument_name + "-level3",
"cloud_product_geodir",
raw=True,
vars=os.environ)
except NoOptionError:
pass
LOG.debug("cloud_product_geodir = %s", self._cloud_product_geodir)
try:
self._geolocation_product_name = \
self._config.get(instrument_name + "-level3",
"geolocation_product_name",
raw=True,
vars=os.environ)
except NoOptionError:
if self._source != 'ears':
LOG.warning("No geolocation product name given in config, "
"using default: %s", GEO_PRODUCT_NAME_DEFAULT)
self._geolocation_product_name = GEO_PRODUCT_NAME_DEFAULT
def _determine_prod_and_geo_files(self, prodfilenames):
"""From the list of product files and the products to load determine the
product files and the geolocation files that will be considered when
reading the data
"""
# geofiles4product is a dict listing all geo-locations files applicable
# for each product.
# prodfiles4product is a dict listing all product files for a given
# product name
prodfiles4product = {}
geofiles4product = {}
if prodfilenames:
if not isinstance(prodfilenames, (list, set, tuple)):
prodfilenames = [prodfilenames]
for fname in prodfilenames:
# Only standard NWCSAF/PPS and EARS-NWC naming accepted!
# No support for old file names (< PPSv2014)
if (os.path.basename(fname).startswith("S_NWC") or
os.path.basename(fname).startswith("W_XX-EUMETSAT")):
if not self._parser:
if os.path.basename(fname).startswith("S_NWC"):
self._source = 'local'
self._parser = Parser(LOCAL_PPS_FILE_MASK)
else:
self._source = 'ears'
self._parser = Parser(EARS_PPS_FILE_MASK)
else:
LOG.info("Unrecognized NWCSAF/PPS file: %s", fname)
continue
parse_info = self._parser.parse(os.path.basename(fname))
prodname = parse_info['product']
if prodname not in prodfiles4product:
prodfiles4product[prodname] = []
prodfiles4product[prodname].append(fname)
# Assemble geolocation information
if self._source == 'ears':
# For EARS data, the files have geolocation in themselves
for prodname, fnames in prodfiles4product.iteritems():
geofiles4product[prodname] = fnames
else:
# For locally processed data, use the geolocation from
# the product defined in config
if self._geolocation_product_name in prodfiles4product:
for prodname in prodfiles4product.keys():
geofiles4product[prodname] = \
prodfiles4product[self._geolocation_product_name]
else:
# If the product files with geolocation are not used,
# assume that they are still available on the disk.
if self._cloud_product_geodir is None:
LOG.warning("Config option 'cloud_product_geodir' is not "
"available! Assuming same directory as "
"products.")
for prodname in prodfiles4product.keys():
geofiles4product[prodname] = []
for fname in prodfiles4product[prodname]:
directory = self._cloud_product_geodir or \
os.path.abspath(fname)
parse_info = \
self._parser.parse(os.path.basename(fname))
fname = fname.replace(parse_info['product'],
self._geolocation_product_name)
fname = os.path.join(directory, fname)
geofiles4product[prodname].append(fname)
# Check that each product file has a corresponding geolocation
# file:
'''
if self._geolocation_product_name:
for prod in products:
if prod not in geofiles4product:
LOG.error("No product name %s in dict "
"geofiles4product!",
prod)
continue
if prod not in prodfiles4product:
LOG.error("No product name %s in dict "
"prodfiles4product!",
prod)
continue
if len(geofiles4product[prod]) != \
len(prodfiles4product[prod]):
LOG.error("Mismatch in number of product files and "
"matching geolocation files!")
'''
return prodfiles4product, geofiles4product
def load(self, satscene, **kwargs):
"""Read data from file and load it into *satscene*.
"""
prodfilenames = kwargs.get('filename')
time_interval = kwargs.get('time_interval')
if prodfilenames and time_interval:
LOG.warning("You have specified both a list of files " +
"and a time interval")
LOG.warning("Specifying a time interval will only take effect " +
"if no files are specified")
time_interval = None
products = satscene.channels_to_load & set(PPS_PRODUCTS)
if len(products) == 0:
LOG.debug("No PPS cloud products to load, abort")
return
self._read_config(satscene.fullname, satscene.instrument_name)
LOG.info("Products to load: %s", str(products))
# If a list of files are provided to the load call, we disregard the
# direcorty and filename specifications/definitions in the config file.
if not prodfilenames:
try:
area_name = satscene.area_id or satscene.area.area_id
except AttributeError:
area_name = "satproj_?????_?????"
# Make the list of files for the requested products:
if isinstance(time_interval, (tuple, set, list)) and \
len(time_interval) == 2:
time_start, time_end = time_interval
else:
time_start, time_end = satscene.time_slot, None
LOG.debug(
"Start and end times: %s %s", str(time_start), str(time_end))
prodfilenames = get_filenames(satscene, products, self._config,
(time_start, time_end), area_name)
LOG.debug("Product files: %s", str(prodfilenames))
retv = self._determine_prod_and_geo_files(prodfilenames)
prodfiles4product, geofiles4product = retv
# Reading the products
classes = {"CTTH": CloudTopTemperatureHeight,
"CT": CloudType,
"CMA": CloudMask,
"PC": PrecipitationClouds,
"CPP": CloudPhysicalProperties
}
nodata_mask = False
read_external_geo = {}
for product in products:
LOG.debug("Loading %s", product)
if product not in prodfiles4product:
LOG.warning("No files found for product: %s", product)
continue
pps_band = PPSProductData(prodfiles4product[product]).read()
chn = classes[product]()
chn.read(pps_band)
if not chn.name in satscene:
LOG.info("Adding new channel %s", chn.name)
satscene.channels.append(chn)
# Check if geolocation is loaded:
if not chn.area:
read_external_geo[product] = satscene.channels[-1].name
# Check if some 'channel'/product needs geolocation. If some
# product does not have geolocation, get it from the
# geofilename:
from pyresample import geometry
# Load geolocation
for chn_name in read_external_geo.values():
LOG.debug("ch_name = %s", str(chn_name))
chn = satscene[chn_name]
geofilenames = geofiles4product[chn_name]
LOG.debug("Geo-files = %s", str(geofilenames))
geoloc = PpsGeolocationData(chn.shape,
chn.granule_lengths,
geofilenames).read()
try:
satscene[chn.name].area = geometry.SwathDefinition(
lons=geoloc.longitudes, lats=geoloc.latitudes)
area_name = ("swath_" + satscene.fullname + "_" +
str(satscene.time_slot) + "_"
+ str(chn.shape) + "_" +
chn.name)
satscene[chn.name].area.area_id = area_name
satscene[chn.name].area_id = area_name
except ValueError:
LOG.exception('Failed making a SwathDefinition: ' +
'min,max lons,lats = (%f %f") (%f,%f)',
geoloc.longitudes.data.min(),
geoloc.longitudes.data.max(),
geoloc.latitudes.data.min(),
geoloc.latitudes.data.max())
LOG.warning("No geolocation loaded for %s", str(chn_name))
# PpsGeolocationData.clear_cache()
return
class CloudType(NwcSafPpsChannel):
"""CloudType PPS channel object"""
def __init__(self):
NwcSafPpsChannel.__init__(self)
self.name = "CT"
class CloudTopTemperatureHeight(NwcSafPpsChannel):
"""Cloud top temperature and height PPS channel object"""
def __init__(self):
NwcSafPpsChannel.__init__(self)
self.name = "CTTH"
class CloudMask(NwcSafPpsChannel):
"""Cloud mask PPS channel object"""
def __init__(self):
NwcSafPpsChannel.__init__(self)
self.name = "CMA"
class PrecipitationClouds(NwcSafPpsChannel):
"""Precipitation clouds PPS channel object"""
def __init__(self):
NwcSafPpsChannel.__init__(self)
self.name = "PC"
class CloudPhysicalProperties(NwcSafPpsChannel):
"""Cloud physical proeperties PPS channel"""
def __init__(self):
NwcSafPpsChannel.__init__(self)
self.name = "CPP"
def get_lonlat_into(filename, out_lons, out_lats, out_mask):
"""Read lon,lat from hdf5 file"""
LOG.debug("Geo File = %s", filename)
shape = out_lons.shape
unzipped = unzip_file(filename)
if unzipped:
filename = unzipped
mda = HDF5MetaData(filename).read()
reduced_grid = False
h5f = h5py.File(filename, 'r')
if "column_indices" in h5f.keys():
col_indices = h5f["column_indices"][:]
if "row_indices" in h5f.keys():
row_indices = h5f["row_indices"][:]
if "nx_reduced" in h5f:
col_indices = h5f["nx_reduced"][:]
if "ny_reduced" in h5f:
row_indices = h5f["ny_reduced"][:]
for key in mda.get_data_keys():
if ((key.endswith("lat") or key.endswith("lon")) or
(key.endswith("lat_reduced") or key.endswith("lon_reduced"))):
lonlat = h5f[key]
fillvalue = lonlat.attrs["_FillValue"]
else:
continue
if key.endswith("lat"):
lonlat.read_direct(out_lats)
elif key.endswith("lon"):
lonlat.read_direct(out_lons)
elif key.endswith("lat_reduced"):
lat_reduced = lonlat[:]
reduced_grid = True
elif key.endswith("lon_reduced"):
lon_reduced = lonlat[:]
if reduced_grid:
from geotiepoints import SatelliteInterpolator
cols_full = np.arange(shape[1])
rows_full = np.arange(shape[0])
satint = SatelliteInterpolator((lon_reduced, lat_reduced),
(row_indices,
col_indices),
(rows_full, cols_full))
out_lons[:], out_lats[:] = satint.interpolate()
new_mask = False
# FIXME: this is to mask out the npp bowtie deleted pixels...
if "NPP" in h5f.attrs['platform']:
if shape[1] == 3200: # M-bands:
new_mask = np.zeros((16, 3200), dtype=bool)
new_mask[0, :1008] = True
new_mask[1, :640] = True
new_mask[14, :640] = True
new_mask[15, :1008] = True
new_mask[14, 2560:] = True
new_mask[1, 2560:] = True
new_mask[0, 2192:] = True
new_mask[15, 2192:] = True
new_mask = np.tile(new_mask, (out_lons.shape[0] / 16, 1))
elif shape[1] == 6400: # I-bands:
LOG.info(
"PPS on I-band resolution. Mask out bow-tie deletion pixels")
LOG.warning("Not yet supported...")
new_mask = np.zeros((32, 6400), dtype=bool)
new_mask[0:2, :2016] = True
new_mask[0:2, 4384:] = True
new_mask[2:4, :1280] = True
new_mask[2:4, 5120:] = True
new_mask[28:30, :1280] = True
new_mask[28:30, 5120:] = True
new_mask[30:32, :2016] = True
new_mask[30:32, 4384:] = True
new_mask = np.tile(new_mask, (out_lons.shape[0] / 32, 1))
else:
LOG.error("VIIRS shape not supported. " +
"No handling of bow-tie deletion pixels: shape = ", str(shape))
out_mask[:] = np.logical_or(
new_mask, np.logical_and(out_lats == fillvalue, out_lons == fillvalue))
# new_mask, np.logical_and(out_lats <= fillvalue, out_lons <= fillvalue))
h5f.close()
if unzipped:
os.remove(unzipped)
def globify_date(filename):
"""Replace date formats with single character wildcards"""
filename = filename.replace("%Y", "????")
filename = filename.replace("%m", "??")
filename = filename.replace("%d", "??")
filename = filename.replace("%H", "??")
filename = filename.replace("%M", "??")
filename = filename.replace("%S", "??")
return filename
| mraspaud/mpop | mpop/satin/nc_pps_l2.py | Python | gpl-3.0 | 41,179 | [
"Bowtie",
"NetCDF"
] | f4075d03c36598fe0762ca2082bd8edeb26fb92bf0fe71ab30e59a461a1f4d5f |
import json
import subprocess
import os
import logging
import numpy as np
import pandas as pd
import requests
import time
logger = logging.getLogger(__name__)
_host = None #: PlotHost
DEFAULT_SERVER = 'http://localhost:2908'
def plotar(data, col=None, size=None, *, xyz=None, type='p', lines=None, label=None,
axis_names=None, col_labels=None,
name=None, description=None, speed=None, auto_scale=True,
digits=5, host=None, return_data=False, push_data=True):
# TODO assert compatibility checks
n = data.shape[0]
df = None
if isinstance(data, pd.DataFrame):
df = data
if xyz is not None:
assert len(xyz)==3
data = df[xyz].values
axis_names = axis_names or xyz
else:
data = df.iloc[:,0:3].values
val = locals().get(i)
axis_names = axis_names or df.columns[xyz].tolist()
def _mk_val(df, val):
if val is None:
return None
elif val is not None and isinstance(val, str) and val in df.columns:
return df[val].values
elif isinstance(val, float):
return np.zeros((n,)) + val
else:
return np.array(val)
col = _mk_val(df, col)
size = _mk_val(df, size)
lines = _mk_val(df, lines)
label = _mk_val(df, label)
for i in [col, size, lines, label]:
assert i is None or i.shape == (n,), f"Parameters need to have same length: {i} has shape {i.shape} but would need {(n,)}"
if auto_scale:
# have all variables scaled to [-1,1]
data = scale(data)
if size is not None:
# scale the sizes between 0.5 and 1.5:
size = scale(size.reshape((-1, 1)))[:,0] + 1.5
if col is not None and col.dtype == np.dtype('O'):
x = pd.Series(col, dtype='category')
col = x.cat.codes.values
col_labels = x.cat.categories.values.tolist()
if col is None:
payload = data[:,:3]
else:
payload = np.hstack((data[:,:3],col.reshape((-1,1))))
# todo: remove NAs, center and scale...
body = {'data': payload.tolist(),'speed': 0, 'protocolVersion': '0.3.0'}
if col is not None: body['col'] = col.tolist()
if size is not None: body['size'] = size.tolist()
if type is not None: body['type'] = type
if label is not None: body['label'] = label.tolist()
if speed is not None: body['speed'] = speed
if axis_names is not None: body['axis_names'] = axis_names
if col_labels is not None: body['col_labels'] = col_labels
metadata = { 'n': n, 'created': time.ctime() }
metadata['name'] = name or "Dataset"
if description is not None: metadata['description'] = description
body['metadata'] = metadata
# data_json = json.dumps(, allow_nan=False)
if push_data:
plot_host = get_host(host)
plot_host.post(json=body)
if return_data:
return body
def linear(*args, group=None, width=1, push_data=True, return_data=False, **kwargs):
body = plotar(*args, **kwargs, push_data=False, return_data=True)
data = body.get('data',[])
col = body.get('col', [0] * len(data))
group = group or col
df = pd.DataFrame(dict(col=col, group=group))
body['lines'] = [
dict(col=int(c), width=width, points=d.index.to_list())
for (c, g), d in df.groupby(['col', 'group'])
]
if push_data:
plot_host = get_host(kwargs.get('host'))
plot_host.post(json=body)
if return_data:
return body
def surfacevr(data, col=None, x=None, y=None,
name=None, description=None, speed=None, auto_scale=True,
digits=5, host=None, return_data=False, push_data=True):
global _host
_host = host
# TODO assert compatibility checks
n,m = data.shape
for i in [col]:
assert i is None or i.shape == data.shape, f"Parameters need to have same shape: {i} has shape {i.shape} but would need {data.shape}"
if auto_scale:
# have the data scaled to [-1,1]
a,b = data.min(),data.max()
if a <= 0 <= b:
# keep the 0 at 0 and scale around that
mx = max(-a,b)
mx = mx or 1 # set to 1 if 0
data = data / mx
else:
data = scale(data, axis=(0,1))
x = scale(x)
y = scale(y)
# TODO: remove NAs
body = {'surface': {'data':data.tolist(), 'col':col, 'shape': (n,m)},'speed': 0, 'protocolVersion': '0.3.0'}
if x is not None:
body['surface']['x'] = np.array(x).tolist()
if y is not None:
body['surface']['y'] = np.array(y).tolist()
if speed is not None: body['speed'] = speed
metadata = { 'n': n, 'm': m, 'created': time.ctime() }
metadata['name'] = name or f"Dataset {n}x{m}"
if description is not None: metadata['description'] = description
body['metadata'] = metadata
if push_data:
plot_host = get_host(host)
plot_host.post(json=body)
if return_data:
return body
def scale(data, axis=(0,)):
if data is None:
return None
if min(data.shape) == 0:
return data
ranges = np.array(data.max(axis) - data.min(axis))
ranges[ranges == 0] = 1
data = (data - data.min(axis)) / ranges * 2 - 1
return data
def controller(width="100%", height="200px"):
url = get_host().external_url("keyboard.html")
if is_in_jupyter():
try:
from IPython.display import IFrame
return IFrame(url, width=width, height=height)
except ImportError:
return url
else:
return url
def viewer(width="100%", height="400px"):
url = get_host().external_url("index.html")
try:
from IPython.display import IFrame
return IFrame(url, width=width, height=height)
except ImportError:
return url
def get_host(host=None):
global _host
if host is not None:
return PlotHost(host)
if _host is None:
# actual detection code
jpy = my_jupyter_server()
if jpy is not None:
hub_prefix = os.getenv("JUPYTERHUB_SERVICE_PREFIX")
if hub_prefix is None:
ext = jpy['url'] + "plotar/"
else:
# on jupyter-/binderhub we don't know the external hostname,
# so we use an absolute URL
ext = hub_prefix+"plotar/"
_host = PlotHost(jpy['url']+"plotar/", external_url=ext, params=jpy['params'], headers=jpy['headers'])
else:
_host = PlotHost(DEFAULT_SERVER)
return _host
class PlotHost:
def __init__(self, url: str, external_url: str = None, params='', headers={}):
self.url = url
if url is None or len(url) == 0 or not isinstance(url, str):
raise ValueError("URL must be not None and a non-empty string.")
if self.url[-1] != '/':
self.url += '/'
if external_url is None:
external_url = self.url
self._external_url = external_url
self.params = "?"+params
self.headers = headers
def internal_url(self, path):
'''Shows the URL that is '''
return self.url + path #+ self.params
def external_url(self, path):
return self._external_url + path + self.params
def post(self, json):
response = requests.post(self.internal_url(""), json=json, headers=self.headers)
response.raise_for_status()
def __repr__(self):
return f"PlotHost({self.url})"
def _repr_html_(self):
return f"PlotAR at <a href='{self.url}'>{self.url}</a>"
def my_jupyter_server(verbose=False, jupyter_parent_pid=None):
servers = []
imported_notebookapp = imported_serverapp = False
try:
from jupyter_server import serverapp
servers += serverapp.list_running_servers()
imported_serverapp = True
except ImportError:
pass
try:
from notebook import notebookapp
imported_notebookapp = True
servers += notebookapp.list_running_servers()
except ImportError:
pass
if not len(servers):
if verbose:
import warnings
warnings.warn(f"no running jupyter server found - imported jupyter_server: {imported_serverapp} notebook: {imported_notebookapp}")
return None
server_pid = os.getenv('JPY_PARENT_PID', jupyter_parent_pid)
if server_pid is None:
if len(servers) > 1:
pass
jpy = servers[0]
else:
for s in servers:
if str(s['pid']) == server_pid:
jpy = s
break
else:
# no matching pid found...
if verbose:
print('no matching jupyter server found!')
jpy = servers[0]
if jpy is None:
return None
return dict(url=jpy['url'],
params="token="+jpy['token'],
headers={'Authorization': 'token ' + jpy['token']},
)
def start_server_process(port: int = 2908, showServerURL=True):
"""Start Server in another process.
Parameters
----------
port
The port on which to run the server (default is 2908).
Returns
-------
Completed Process of `subprocess.run`.
"""
import sys
python = sys.executable
# or os.__file__.split("lib/")[0],"bin","python") ?
proc = subprocess.Popen([python, '-m', 'plotar.server', str(port)])
if showServerURL:
url = _host+'/index.html'
try:
response = requests.get(_host+"/qr.json")
response.raise_for_status()
url = response.json()['url']
except Exception as ex:
print("Problem getting external IP: ", ex)
pass
try:
from IPython.display import display, SVG, HTML
import pyqrcode
from io import BytesIO
io = BytesIO()
pyqrcode.QRCode(url).svg(io, scale=4)
img = io.getvalue().decode('utf-8')
display(HTML(f'Visit: <a href="{url}">{url}</a>'))
display(SVG(img))
except ImportError:
print(f"Visit: {url}")
return proc
def is_in_jupyter() -> bool:
# https://stackoverflow.com/a/39662359/6400719
try:
from IPython import get_ipython
shell = get_ipython().__class__.__name__
if shell == "ZMQInteractiveShell":
return True # Jupyter notebook or qtconsole
except:
return False # Probably standard Python interpreter
return False
| thomann/plotVR | plotAR-py/plotar/client.py | Python | agpl-3.0 | 10,543 | [
"VisIt"
] | 0b2bb40b14ad5fdb69e7da042883254f2ba9cc288082ab6cb5d5ab23e72eb8b2 |
import pytest
import torch
from mmdet.core.post_processing import mask_matrix_nms
def _create_mask(N, h, w):
masks = torch.rand((N, h, w)) > 0.5
labels = torch.rand(N)
scores = torch.rand(N)
return masks, labels, scores
def test_nms_input_errors():
with pytest.raises(AssertionError):
mask_matrix_nms(
torch.rand((10, 28, 28)), torch.rand(11), torch.rand(11))
with pytest.raises(AssertionError):
masks = torch.rand((10, 28, 28))
mask_matrix_nms(
masks,
torch.rand(11),
torch.rand(11),
mask_area=masks.sum((1, 2)).float()[:8])
with pytest.raises(NotImplementedError):
mask_matrix_nms(
torch.rand((10, 28, 28)),
torch.rand(10),
torch.rand(10),
kernel='None')
# test an empty results
masks, labels, scores = _create_mask(0, 28, 28)
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 0
# do not use update_thr, nms_pre and max_num
masks, labels, scores = _create_mask(1000, 28, 28)
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 1000
# only use nms_pre
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores, nms_pre=500)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 500
# use max_num
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores,
nms_pre=500, max_num=100)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 100
masks, labels, _ = _create_mask(1, 28, 28)
scores = torch.Tensor([1.0])
masks = masks.expand(1000, 28, 28)
labels = labels.expand(1000)
scores = scores.expand(1000)
# assert scores is decayed and update_thr is worked
# if with the same mask, label, and all scores = 1
# the first score will set to 1, others will decay.
score, label, mask, keep_ind = \
mask_matrix_nms(masks,
labels,
scores,
nms_pre=500,
max_num=100,
kernel='gaussian',
sigma=2.0,
filter_thr=0.5)
assert len(score) == 1
assert score[0] == 1
| open-mmlab/mmdetection | tests/test_utils/test_nms.py | Python | apache-2.0 | 2,528 | [
"Gaussian"
] | 5ac537f7b227034e9f9e8dbe42a562cc5bd99f936781998e9a4d732d8cd25a45 |
import numpy as np
from ase.units import Ha
from ase.dft.kpoints import monkhorst_pack
from ase.parallel import paropen
from ase.lattice import bulk
from gpaw import GPAW, FermiDirac
from gpaw.wavefunctions.pw import PW
from gpaw.mpi import size
kpts = monkhorst_pack((10,10,10))
kpts += np.array([1/20., 1/20., 1/20.])
bulk = bulk('Na', 'bcc', a=4.23)
tag = 'Nabulk'
if 1:
ecut = 350
calc = GPAW(mode=PW(ecut),dtype=complex, basis='dzp', kpts=kpts, xc='PBE',
eigensolver='rmm-diis',
parallel={'band': size}, txt='gs_occ_%s.txt' %(tag), nbands=4,
occupations=FermiDirac(0.01), setups={'Na': '1'},
)
bulk.set_calculator(calc)
bulk.get_potential_energy()
calc.write('gs_occ_%s.gpw' %(tag))
if 1:
calc = GPAW('gs_occ_%s.gpw' %(tag),txt='gs_%s.txt'%(tag), parallel={'band': 1, 'domain':1})
calc.diagonalize_full_hamiltonian(nbands=520)
calc.write('gs_%s.gpw' %(tag), 'all')
| robwarm/gpaw-symm | gpaw/test/big/rpa/Na_bulk_gs.py | Python | gpl-3.0 | 972 | [
"ASE",
"GPAW"
] | 10a688f025265c4fa7f09500c02cee8aca348a1467280ac8492dfc21c2fe1300 |
"""HTTPS module based on the GFAL2_StorageBase class."""
# from DIRAC
from DIRAC.Resources.Storage.GFAL2_StorageBase import GFAL2_StorageBase
from DIRAC import gLogger
class GFAL2_HTTPSStorage(GFAL2_StorageBase):
""".. class:: GFAL2_HTTPSStorage
HTTP interface to StorageElement using gfal2
"""
# davs is for https with direct access + third party
_INPUT_PROTOCOLS = ["file", "http", "https", "dav", "davs"]
_OUTPUT_PROTOCOLS = ["http", "https", "dav", "davs"]
def __init__(self, storageName, parameters):
"""c'tor"""
# # init base class
super(GFAL2_HTTPSStorage, self).__init__(storageName, parameters)
self.srmSpecificParse = False
self.log = gLogger.getSubLogger("GFAL2_HTTPSStorage")
self.pluginName = "GFAL2_HTTPS"
# We don't need extended attributes for metadata
self._defaultExtendedAttributes = None
| DIRACGrid/DIRAC | src/DIRAC/Resources/Storage/GFAL2_HTTPSStorage.py | Python | gpl-3.0 | 909 | [
"DIRAC"
] | 94f0575bbda4cbcced87f01ee5dd9df4c2b13ffd20a7edde482bdf1afe31367e |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.region_ssl_certificates import (
RegionSslCertificatesClient,
)
from google.cloud.compute_v1.services.region_ssl_certificates import pagers
from google.cloud.compute_v1.services.region_ssl_certificates import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RegionSslCertificatesClient._get_default_mtls_endpoint(None) is None
assert (
RegionSslCertificatesClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
RegionSslCertificatesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
RegionSslCertificatesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionSslCertificatesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionSslCertificatesClient._get_default_mtls_endpoint(non_googleapi)
== non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionSslCertificatesClient, "rest"),]
)
def test_region_ssl_certificates_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.RegionSslCertificatesRestTransport, "rest"),],
)
def test_region_ssl_certificates_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionSslCertificatesClient, "rest"),]
)
def test_region_ssl_certificates_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_region_ssl_certificates_client_get_transport_class():
transport = RegionSslCertificatesClient.get_transport_class()
available_transports = [
transports.RegionSslCertificatesRestTransport,
]
assert transport in available_transports
transport = RegionSslCertificatesClient.get_transport_class("rest")
assert transport == transports.RegionSslCertificatesRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
RegionSslCertificatesClient,
transports.RegionSslCertificatesRestTransport,
"rest",
),
],
)
@mock.patch.object(
RegionSslCertificatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionSslCertificatesClient),
)
def test_region_ssl_certificates_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(RegionSslCertificatesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(RegionSslCertificatesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
RegionSslCertificatesClient,
transports.RegionSslCertificatesRestTransport,
"rest",
"true",
),
(
RegionSslCertificatesClient,
transports.RegionSslCertificatesRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
RegionSslCertificatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionSslCertificatesClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_region_ssl_certificates_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [RegionSslCertificatesClient])
@mock.patch.object(
RegionSslCertificatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionSslCertificatesClient),
)
def test_region_ssl_certificates_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(
RegionSslCertificatesClient,
transports.RegionSslCertificatesRestTransport,
"rest",
),
],
)
def test_region_ssl_certificates_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(
RegionSslCertificatesClient,
transports.RegionSslCertificatesRestTransport,
"rest",
None,
),
],
)
def test_region_ssl_certificates_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [compute.DeleteRegionSslCertificateRequest, dict,]
)
def test_delete_unary_rest(request_type):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"ssl_certificate": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_unary_rest_required_fields(
request_type=compute.DeleteRegionSslCertificateRequest,
):
transport_class = transports.RegionSslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["ssl_certificate"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["sslCertificate"] = "ssl_certificate_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "sslCertificate" in jsonified_request
assert jsonified_request["sslCertificate"] == "ssl_certificate_value"
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "delete",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_delete_unary_rest_unset_required_fields():
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.delete._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "region", "sslCertificate",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_delete_unary_rest_interceptors(null_interceptor):
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionSslCertificatesRestInterceptor(),
)
client = RegionSslCertificatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionSslCertificatesRestInterceptor, "post_delete"
) as post, mock.patch.object(
transports.RegionSslCertificatesRestInterceptor, "pre_delete"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.DeleteRegionSslCertificateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.delete_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_delete_unary_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteRegionSslCertificateRequest
):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"ssl_certificate": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_unary(request)
def test_delete_unary_rest_flattened():
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"ssl_certificate": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
ssl_certificate="ssl_certificate_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.delete_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/sslCertificates/{ssl_certificate}"
% client.transport._host,
args[1],
)
def test_delete_unary_rest_flattened_error(transport: str = "rest"):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_unary(
compute.DeleteRegionSslCertificateRequest(),
project="project_value",
region="region_value",
ssl_certificate="ssl_certificate_value",
)
def test_delete_unary_rest_error():
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.GetRegionSslCertificateRequest, dict,]
)
def test_get_rest(request_type):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"ssl_certificate": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificate(
certificate="certificate_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
expire_time="expire_time_value",
id=205,
kind="kind_value",
name="name_value",
private_key="private_key_value",
region="region_value",
self_link="self_link_value",
subject_alternative_names=["subject_alternative_names_value"],
type_="type__value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.SslCertificate)
assert response.certificate == "certificate_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.expire_time == "expire_time_value"
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.private_key == "private_key_value"
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.subject_alternative_names == ["subject_alternative_names_value"]
assert response.type_ == "type__value"
def test_get_rest_required_fields(request_type=compute.GetRegionSslCertificateRequest):
transport_class = transports.RegionSslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request_init["ssl_certificate"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
jsonified_request["sslCertificate"] = "ssl_certificate_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
assert "sslCertificate" in jsonified_request
assert jsonified_request["sslCertificate"] == "ssl_certificate_value"
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificate()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (
set(()) & set(("project", "region", "sslCertificate",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_get_rest_interceptors(null_interceptor):
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionSslCertificatesRestInterceptor(),
)
client = RegionSslCertificatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionSslCertificatesRestInterceptor, "post_get"
) as post, mock.patch.object(
transports.RegionSslCertificatesRestInterceptor, "pre_get"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.SslCertificate.to_json(
compute.SslCertificate()
)
request = compute.GetRegionSslCertificateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.SslCertificate
client.get(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetRegionSslCertificateRequest
):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {
"project": "sample1",
"region": "sample2",
"ssl_certificate": "sample3",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificate()
# get arguments that satisfy an http rule for this method
sample_request = {
"project": "sample1",
"region": "sample2",
"ssl_certificate": "sample3",
}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
ssl_certificate="ssl_certificate_value",
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/sslCertificates/{ssl_certificate}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetRegionSslCertificateRequest(),
project="project_value",
region="region_value",
ssl_certificate="ssl_certificate_value",
)
def test_get_rest_error():
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.InsertRegionSslCertificateRequest, dict,]
)
def test_insert_unary_rest(request_type):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["ssl_certificate_resource"] = {
"certificate": "certificate_value",
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"expire_time": "expire_time_value",
"id": 205,
"kind": "kind_value",
"managed": {
"domain_status": {},
"domains": ["domains_value_1", "domains_value_2"],
"status": "status_value",
},
"name": "name_value",
"private_key": "private_key_value",
"region": "region_value",
"self_link": "self_link_value",
"self_managed": {
"certificate": "certificate_value",
"private_key": "private_key_value",
},
"subject_alternative_names": [
"subject_alternative_names_value_1",
"subject_alternative_names_value_2",
],
"type_": "type__value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_unary_rest_required_fields(
request_type=compute.InsertRegionSslCertificateRequest,
):
transport_class = transports.RegionSslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_insert_unary_rest_unset_required_fields():
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "region", "sslCertificateResource",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_insert_unary_rest_interceptors(null_interceptor):
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionSslCertificatesRestInterceptor(),
)
client = RegionSslCertificatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionSslCertificatesRestInterceptor, "post_insert"
) as post, mock.patch.object(
transports.RegionSslCertificatesRestInterceptor, "pre_insert"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.InsertRegionSslCertificateRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.insert_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.InsertRegionSslCertificateRequest
):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["ssl_certificate_resource"] = {
"certificate": "certificate_value",
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"expire_time": "expire_time_value",
"id": 205,
"kind": "kind_value",
"managed": {
"domain_status": {},
"domains": ["domains_value_1", "domains_value_2"],
"status": "status_value",
},
"name": "name_value",
"private_key": "private_key_value",
"region": "region_value",
"self_link": "self_link_value",
"self_managed": {
"certificate": "certificate_value",
"private_key": "private_key_value",
},
"subject_alternative_names": [
"subject_alternative_names_value_1",
"subject_alternative_names_value_2",
],
"type_": "type__value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert_unary(request)
def test_insert_unary_rest_flattened():
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
ssl_certificate_resource=compute.SslCertificate(
certificate="certificate_value"
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/sslCertificates"
% client.transport._host,
args[1],
)
def test_insert_unary_rest_flattened_error(transport: str = "rest"):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert_unary(
compute.InsertRegionSslCertificateRequest(),
project="project_value",
region="region_value",
ssl_certificate_resource=compute.SslCertificate(
certificate="certificate_value"
),
)
def test_insert_unary_rest_error():
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize(
"request_type", [compute.ListRegionSslCertificatesRequest, dict,]
)
def test_list_rest(request_type):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(
request_type=compute.ListRegionSslCertificatesRequest,
):
transport_class = transports.RegionSslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("filter", "max_results", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("filter", "maxResults", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_list_rest_interceptors(null_interceptor):
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionSslCertificatesRestInterceptor(),
)
client = RegionSslCertificatesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionSslCertificatesRestInterceptor, "post_list"
) as post, mock.patch.object(
transports.RegionSslCertificatesRestInterceptor, "pre_list"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.SslCertificateList.to_json(
compute.SslCertificateList()
)
request = compute.ListRegionSslCertificatesRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.SslCertificateList
client.list(request, metadata=[("key", "val"), ("cephalopod", "squid"),])
pre.assert_called_once()
post.assert_called_once()
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListRegionSslCertificatesRequest
):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateList()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(project="project_value", region="region_value",)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/sslCertificates"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListRegionSslCertificatesRequest(),
project="project_value",
region="region_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.SslCertificateList(
items=[
compute.SslCertificate(),
compute.SslCertificate(),
compute.SslCertificate(),
],
next_page_token="abc",
),
compute.SslCertificateList(items=[], next_page_token="def",),
compute.SslCertificateList(
items=[compute.SslCertificate(),], next_page_token="ghi",
),
compute.SslCertificateList(
items=[compute.SslCertificate(), compute.SslCertificate(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.SslCertificateList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1", "region": "sample2"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.SslCertificate) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionSslCertificatesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionSslCertificatesClient(
client_options=options, transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionSslCertificatesClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionSslCertificatesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RegionSslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RegionSslCertificatesClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize(
"transport_class", [transports.RegionSslCertificatesRestTransport,]
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_region_ssl_certificates_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RegionSslCertificatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_region_ssl_certificates_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.region_ssl_certificates.transports.RegionSslCertificatesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.RegionSslCertificatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"delete",
"get",
"insert",
"list",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_region_ssl_certificates_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.region_ssl_certificates.transports.RegionSslCertificatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionSslCertificatesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_region_ssl_certificates_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.region_ssl_certificates.transports.RegionSslCertificatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionSslCertificatesTransport()
adc.assert_called_once()
def test_region_ssl_certificates_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RegionSslCertificatesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_region_ssl_certificates_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.RegionSslCertificatesRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_ssl_certificates_host_no_port(transport_name):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_ssl_certificates_host_with_port(transport_name):
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = RegionSslCertificatesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = RegionSslCertificatesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RegionSslCertificatesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = RegionSslCertificatesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = RegionSslCertificatesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RegionSslCertificatesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = RegionSslCertificatesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = RegionSslCertificatesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RegionSslCertificatesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = RegionSslCertificatesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = RegionSslCertificatesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RegionSslCertificatesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = RegionSslCertificatesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = RegionSslCertificatesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RegionSslCertificatesClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.RegionSslCertificatesTransport, "_prep_wrapped_messages"
) as prep:
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.RegionSslCertificatesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = RegionSslCertificatesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = RegionSslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(RegionSslCertificatesClient, transports.RegionSslCertificatesRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| googleapis/python-compute | tests/unit/gapic/compute_v1/test_region_ssl_certificates.py | Python | apache-2.0 | 82,189 | [
"Octopus"
] | a37d8e7559f444225b4d41284ea7f13a07deab7d6c3838ee62513fd73a1a26a3 |
#!/usr/bin/env python
from gnuradio import gr
from gnuradio import gr, blocks, analog, filter
# import ofdm
import numpy
from transmitter_hier_bc import transmitter_hier_bc
from receiver_hier_cb import receiver_hier_cb
from channel_hier_cc import channel_hier_cc
from numpy import sqrt
import math
# from fbmc_swig
class test_demapper_fbmc_multiuser:
def __init__ ( self ):
pass
# def test_symbol_src ( self, arity ):
# vlen = 1
# N = int( 1e7 )
# demapper = ofdm.generic_demapper_vcb( vlen )
# const = demapper.get_constellation( arity )
# assert( len( const ) == 2**arity )
# symsrc = ofdm.symbol_random_src( const, vlen )
# acc = ofdm.accumulator_cc()
# skiphead = blocks.skiphead( gr.sizeof_gr_complex, N-1 )
# limit = blocks.head( gr.sizeof_gr_complex, 1 )
# dst = blocks.vector_sink_c()
# c2mag = blocks.complex_to_mag_squared()
# acc_c2m = ofdm.accumulator_ff()
# skiphead_c2m = blocks.skiphead( gr.sizeof_float, N-1 )
# limit_c2m = blocks.head( gr.sizeof_float, 1 )
# dst_c2m = blocks.vector_sink_f()
# tb = gr.top_block ( "test__block" )
# tb.connect( symsrc, acc, skiphead, limit, dst )
# tb.connect( symsrc, c2mag, acc_c2m, skiphead_c2m, limit_c2m, dst_c2m )
# tb.run()
# data = numpy.array( dst.data() )
# data_c2m = numpy.array( dst_c2m.data() )
# m = data / N
# av_pow = data_c2m / N
# assert( abs( m ) < 0.01 )
# assert( abs( 1.0 - av_pow ) < 0.5 )
# print "Uniform distributed random symbol source has"
# print "\tno offset for N=%d, relative error: %f" % (arity, abs( m ) )
# print "\tAverage signal power equal 1.0, relative error: %f\t\tOK" \
# % ( abs( 1.0 - av_pow ) )
def sim ( self, arity, snr_db, N ):
M = 1024
theta_sel = 0
syms_per_frame = 10
zero_pads = 1
center_preamble = [1, -1j, -1, 1j] # assumed to be normalized to 1
qam_size = 2**arity
preamble = [0]*M*zero_pads+center_preamble*((int)(M/len(center_preamble)))+[0]*M*zero_pads
# num_symbols = 2**12
exclude_preamble = 0
exclude_multipath =0
sel_taps = 0 # epa=0, eva = 1, etu=3
freq_offset=0
exclude_noise = 0
sel_noise_type =0 # gaussian
eq_select = 3
# SNR = 20
K = 4
N = int( N ) # num of !samples!
num_bits = N*arity
# amp = math.sqrt(M/(10**(float(snr_db)/10)))/math.sqrt(2)
# amp = math.sqrt((10**(float(-1*snr_db)/20))*(2*K*M+(2*syms_per_frame-1)*M)/(4*syms_per_frame))/math.sqrt(2)
if exclude_preamble:
amp = math.sqrt((10**(float(-1*snr_db)/10))*(2*K*M+(2*syms_per_frame-1)*M)/(4*syms_per_frame))/math.sqrt(2)
else:
amp = math.sqrt((10**(float(-1*snr_db)/10))*(M*(syms_per_frame+1)/(syms_per_frame+1+2*zero_pads))*((K*M+(2*syms_per_frame-1)*M/2)/(M*syms_per_frame)))/math.sqrt(2)
# print amp
# print amp2
tx = transmitter_hier_bc(M, K, qam_size, syms_per_frame, theta_sel, exclude_preamble, center_preamble,1)
rx = receiver_hier_cb(M, K, qam_size, syms_per_frame, theta_sel, eq_select, exclude_preamble, center_preamble,1)
ch = channel_hier_cc(M, K, syms_per_frame, exclude_multipath, sel_taps, freq_offset, exclude_noise, sel_noise_type, snr_db, exclude_preamble, zero_pads)
# # src = blocks.vector_source_b(src_data, vlen=1)
xor_block = blocks.xor_bb()
head1 = blocks.head(gr.sizeof_char*1, N)
head0 = blocks.head(gr.sizeof_char*1, N)
add_block = blocks.add_vcc(1)
src = blocks.vector_source_b(map(int, numpy.random.randint(0, qam_size, 100000)), True)
noise = analog.fastnoise_source_c(analog.GR_GAUSSIAN, amp, 0, 8192)
dst = blocks.vector_sink_b(vlen=1)
tb = gr.top_block ( "test_block" )
tb.connect((src, 0), (head1, 0)) #esas
tb.connect((head1, 0), (xor_block, 0)) #esas
tb.connect((src, 0), (tx, 0)) #esas
tb.connect((tx, 0), (add_block, 0)) #esas
tb.connect((noise, 0), (add_block, 1)) #esas
# tb.connect((head0, 0), (add_block, 1)) #esas
tb.connect((add_block, 0), (rx, 0)) #esas
tb.connect((rx, 0),(head0, 0)) #esas
tb.connect((head0, 0), (xor_block, 1)) #esas
tb.connect((xor_block, 0), (dst, 0)) #esas
tb.run()
# what we record in dst.data will be output of xor_block. now we have to process those data
# so as to find bit errors.
result_data = dst.data()
bit_errors = 0
for i in range(len(result_data)):
# print bin(result_data[i])
bit_errors = bit_errors + (bin(result_data[i]).count('1'))
# print len(result_data)
# return 1
return float(bit_errors) / num_bits
def start ( self ):
# for i in range(1,9):
# test_symbol_src( i )
N = 2**20 #!! we take this as number of samples, not bits.
ber_curves = dict()
narity_range = [2, 4, 6, 8]
for arity in narity_range:
# min_ber = 0
min_ber = 100. / (N*arity)
ber_arr = []
snr_range = range(0, 30, 1)
for snr_db in snr_range:
ber = self.sim( arity, snr_db, N )
ber_arr.append( ber )
print "For n-arity %d and SNR = %.1f dB, BER is ~%g" \
% ( arity, snr_db , ber )
if ber <= min_ber:
break
ber_curves[arity] = ber_arr
print "snr = [",
for snr_db in snr_range:
print "%.1f," % ( snr_db ),
print "]"
print "ber = [",
for arity in narity_range:
curve = ber_curves[arity]
for x in curve:
print "%7g," % (x),
for i in range( len( snr_range ) - len( curve ) ):
print " 0.0,",
print ";"
print "]"
print "ber_ref = [",
for arity in narity_range:
curve = ber_curves[arity]
if arity == 1:
mode = 'pam'
elif arity == 2 or arity == 3:
mode = 'psk'
else:
mode = 'qam'
print "berawgn(snr(1:%d)-10*log10(%d), '%s', %d " \
% (len(curve),arity, mode, 2**arity ) ,
if arity == 2 or arity == 3:
print ", 'nondiff'",
print "), ",
for i in range( len( snr_range ) - len( curve ) ):
print " 0.0,",
print ";"
print "]"
print "semilogy(snr,ber,'--x')"
print "hold on"
print "semilogy(snr,ber_ref,'--o')"
print "legend('QPSK','16QAM','64QAM','256QAM')"
print "grid on"
print "xlabel 'SNR (dB)'"
print "ylabel 'approximate BER'"
print "title 'BER over SNR for FBMC system, N=%d window size'" % ( N )
if __name__ == '__main__':
t = test_demapper_fbmc()
t.start()
| rwth-ti/gr-ofdm | python/test_demapper_fbmc_multiuser.py | Python | gpl-3.0 | 6,163 | [
"Gaussian"
] | d4813ba219ea27c6908cfed010d79851e5e3629d3bb23ce70ff3de9a27ecacde |
import ast
import linecache
from .namespace import (VariableVisitor, ComplexAssignment,
NamespaceStep)
from .tools import (copy_lineno, _new_constant, pretty_dump,
ReplaceVariable, FindStrVisitor, get_literal,
RestrictToFunctionDefMixin, UNSET)
from .specialized import BuiltinGuard, SpecializedFunction
from .base_optimizer import BaseOptimizer
from .const_propagate import ConstantPropagation
from .const_fold import ConstantFolding
from .call_pure import CallPureBuiltin
from .unroll import UnrollStep, UnrollListComp
from .copy_bltin_to_const import CopyBuiltinToConstantStep
from .bltin_const import ReplaceBuiltinConstant
from .convert_const import ConvertConstant
from .dead_code import RemoveDeadCode, remove_dead_code
from .iterable import SimplifyIterable, SimplifyIterableSpecialize
from .call_method import CallPureMethods
from .inline import InlineSubstitution
def add_import(tree, name, asname):
# import fat as __fat__
import_node = ast.Import(names=[ast.alias(name=name, asname=asname)],
lineno=1, col_offset=1)
for index, node in enumerate(tree.body):
if (index == 0 and isinstance(node, ast.Expr)
and isinstance(node.value, ast.Constant)
and isinstance(node.value.value, str)):
# docstring
continue
if (isinstance(node, ast.ImportFrom) and node.module == '__future__'):
# from __future__ import ...
continue
tree.body.insert(index, import_node)
break
else:
# body is empty or only contains __future__ imports
tree.body.append(import_node)
class NakedOptimizer(BaseOptimizer):
"""Optimizer without any optimization."""
def __init__(self, config, filename, parent=None):
BaseOptimizer.__init__(self, filename)
self.config = config
if parent is not None:
self.parent = parent
# module is a ModuleOptimizer instance
self.module = parent.module
self.funcdef_depth = parent.funcdef_depth
else:
self.parent = None
self.module = self
self.funcdef_depth = 0
# attributes set in optimize()
self.root = None
self._global_variables = set()
self.nonlocal_variables = set()
self.local_variables = set()
# used by FunctionOptimizer.new_str_constant()
self._new_str_constants = set()
def optimize_node_list(self, node_list):
if not self.config.remove_dead_code:
return node_list
return remove_dead_code(self, node_list)
@classmethod
def from_parent(cls, parent):
return cls(parent.config, parent.filename, parent=parent)
def new_constant(self, node, value):
if not self.config.check_result(value):
return
return _new_constant(node, value)
def log(self, node, message, *args, add_line=False):
logger = self.config.logger
if not logger:
return
message = message % args
message = "%s: fatoptimizer: %s" % (self.error_where(node), message)
print(message, file=logger)
if add_line:
line = linecache.getline(self.filename, node.lineno)
if line:
line = line.strip()
if line:
print(" %s" % line, file=logger)
logger.flush()
def _is_global_variable(self, name):
if name in self._global_variables:
return True
module = self.module
if module is not self:
if name in module.local_variables:
return True
if name in module._global_variables:
return True
return False
def is_builtin_variable(self, name):
# local variable?
if name in self.local_variables:
return False
# global variable?
if self._is_global_variable(name):
return False
# non local variable?
if name in self.nonlocal_variables:
return False
# free variable? (local variable of a parent function)
parent = self.parent
while parent is not None:
if name in parent.local_variables:
return False
parent = parent.parent
# variable not defined anywhere: it is likely
# the expected builtin function
return True
def new_local_variable(self, name):
if name in self.local_variables:
index = 2
while True:
name2 = "%s%s" % (name, index)
if name2 not in self.local_variables:
break
index += 1
name = name2
return name
def _run_new_optimizer(self, node):
optimizer = Optimizer.from_parent(self)
return optimizer.optimize(node)
def _run_sub_optimizer(self, optimizer, node):
new_node = optimizer.optimize(node)
if isinstance(new_node, list):
# The function was optimized
# find new local variables
visitor = VariableVisitor.from_node_list(self.filename, new_node)
self.local_variables |= visitor.local_variables
return new_node
def fullvisit_FunctionDef(self, node):
optimizer = FunctionOptimizer.from_parent(self)
return self._run_sub_optimizer(optimizer, node)
def fullvisit_ListComp(self, node):
optimizer = ComprehensionOptimizer.from_parent(self)
return self._run_sub_optimizer(optimizer, node)
def fullvisit_SetComp(self, node):
optimizer = ComprehensionOptimizer.from_parent(self)
return self._run_sub_optimizer(optimizer, node)
def fullvisit_DictComp(self, node):
optimizer = ComprehensionOptimizer.from_parent(self)
return self._run_sub_optimizer(optimizer, node)
def _optimize(self, tree):
return self.generic_visit(tree)
def optimize(self, tree):
self.root = tree
# Find variables
visitor = VariableVisitor(self.filename)
try:
visitor.find_variables(tree)
except ComplexAssignment as exc:
# globals() is used to store a variable:
# give up, don't optimize the function
self.log(exc.node, "skip optimisation: %s", exc)
return tree
self._global_variables |= visitor.global_variables
self.nonlocal_variables |= visitor.nonlocal_variables
self.local_variables |= visitor.local_variables
# Optimize nodes
return self._optimize(tree)
class Optimizer(NakedOptimizer,
NamespaceStep,
ReplaceBuiltinConstant,
CallPureMethods,
UnrollStep,
ConstantPropagation,
SimplifyIterable,
ConstantFolding,
RemoveDeadCode):
"""Optimizer for AST nodes other than Module and FunctionDef."""
class FunctionOptimizerStage1(RestrictToFunctionDefMixin, Optimizer):
"""Stage 1 optimizer for ast.FunctionDef nodes."""
class ComprehensionOptimizer(RestrictToFunctionDefMixin,
UnrollListComp,
Optimizer):
"""Optimizer for ast.ListComp and ast.SetComp nodes."""
def _optimize(self, tree):
tree = self.generic_visit(tree)
new_tree = self.unroll_comprehension(tree)
if new_tree is not None:
# run again stage1
tree = self.generic_visit(new_tree)
return tree
class FunctionOptimizer(NakedOptimizer,
CallPureBuiltin,
SimplifyIterableSpecialize,
InlineSubstitution,
CopyBuiltinToConstantStep):
"""Optimizer for ast.FunctionDef nodes.
First, run FunctionOptimizerStage1 and then run optimizations which may
create a specialized function.
"""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
if self.parent is None:
raise ValueError("parent is not set")
self.funcdef_depth += 1
self._guards = []
# FIXME: move this to the optimizer step?
# global name => CopyBuiltinToConstant
self.copy_builtin_to_constants = {}
def add_guard(self, new_guard):
if not isinstance(new_guard, BuiltinGuard):
raise ValueError("unsupported guard")
if self._guards:
guard = self._guards[0]
guard.add(new_guard)
else:
self._guards.append(new_guard)
def new_str_constant(self, value):
str_constants = self._new_str_constants
str_constants |= self.parent._new_str_constants
# FIXME: self.root is an old version of the tree, the new tree can
# contain new strings
visitor = FindStrVisitor.from_node(self.filename, self.root)
str_constants |= visitor.str_constants
visitor = FindStrVisitor.from_node(self.filename, self.parent.root)
str_constants |= visitor.str_constants
if value in str_constants:
index = 2
while True:
new_value = "%s#%s" % (value, index)
if new_value not in str_constants:
break
index += 1
value = new_value
self._new_str_constants.add(value)
self.parent._new_str_constants.add(value)
return value
def _patch_constants(self, node):
copy_builtin_to_constants = self.copy_builtin_to_constants.values()
patch_constants = {}
for copy_global in copy_builtin_to_constants:
builtin_name = copy_global.global_name
value = ast.Name(id=builtin_name, ctx=ast.Load())
patch_constants[copy_global.unique_constant] = value
self.add_guard(BuiltinGuard(builtin_name, reason='patch constant'))
names = dict((copy_global.global_name, copy_global.unique_constant)
for copy_global in copy_builtin_to_constants)
replace = ReplaceVariable(self.filename, names)
new_node = replace.replace_func_def(node)
return (new_node, patch_constants)
def _specialize(self, func_node, new_node):
if self.copy_builtin_to_constants:
new_node, patch_constants = self._patch_constants(new_node)
else:
patch_constants = None
self.log(func_node, "specialize function %s, guards: %s",
func_node.name, self._guards)
new_body = [func_node]
tmp_name = self.parent.new_local_variable('_ast_optimized')
func = SpecializedFunction(new_node.body, self._guards, patch_constants)
modname = self.module.get_fat_module_name()
for node in func.to_ast(modname, func_node, tmp_name):
copy_lineno(func_node, node)
new_body.append(node)
return new_body
def _stage1(self, tree):
optimizer = FunctionOptimizerStage1.from_parent(self)
return optimizer.optimize(tree)
def optimize(self, func_node):
func_node = self._stage1(func_node)
if func_node.decorator_list:
# FIXME: support decorators
self.log(func_node, "skip optimisation: don't support decorators")
return func_node
# FIXME: specialize somehow nested functions?
if self.funcdef_depth > 1:
self.log(func_node,
"skip optimisation requiring specialization "
"on nested function")
return func_node
new_node = super().optimize(func_node)
if self._guards:
# calling pure functions, replacing range(n) with a tuple, etc.
# can allow new optimizations with the stage 1
new_node = self._stage1(new_node)
if self.copy_builtin_to_constants or self._guards:
new_node = self._specialize(func_node, new_node)
return new_node
class ModuleOptimizer(Optimizer):
"""Optimizer for ast.Module nodes."""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self._fat_module = None
def get_fat_module_name(self):
if not self._fat_module:
# FIXME: ensure that the name is unique...
self._fat_module = '__fat__'
return self._fat_module
def _replace_config(self, node):
config = get_literal(node, types=dict, constant_items=True)
if config is UNSET:
# ignore invalid config
return True
# Replace the configuration
# Note: unknown configuration options are ignored
self.config = self.config.replace(config)
def _find_config(self, body):
# FIXME: only search in the N first statements?
# Example: skip docstring, but stop at the first import?
for node in body:
if (isinstance(node, ast.Assign)
and len(node.targets) == 1
and isinstance(node.targets[0], ast.Name)
and node.targets[0].id == '__fatoptimizer__'):
self._replace_config(node.value)
def optimize(self, tree):
orig_tree = tree
tree = ConvertConstant(self.filename).visit(tree)
if isinstance(tree, ast.Module):
self._find_config(tree.body)
if not self.config.enabled:
self.log(tree,
"skip optimisation: disabled in __fatoptimizer__")
return orig_tree
tree = super().optimize(tree)
if self._fat_module:
add_import(tree, 'fat', self._fat_module)
return tree
| haypo/fatoptimizer | fatoptimizer/optimizer.py | Python | mit | 13,703 | [
"VisIt"
] | d3b81c8da5b57bea5facbc5e5dbafe72a183e321054d2fcdd1d8cc7b29d00a1d |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions that interface with Grimme's GCP code."""
from __future__ import absolute_import, print_function
import os
import re
import uuid
import socket
import subprocess
try:
from psi4.driver.p4util.exceptions import *
from psi4 import core
isP4regime = True
except ImportError:
from .exceptions import *
isP4regime = False
from .p4regex import *
from .molecule import Molecule
def run_gcp(self, func=None, dertype=None, verbose=False): # dashlvl=None, dashparam=None
"""Function to call Grimme's dftd3 program (http://toc.uni-muenster.de/DFTD3/)
to compute the -D correction of level *dashlvl* using parameters for
the functional *func*. The dictionary *dashparam* can be used to supply
a full set of dispersion parameters in the absense of *func* or to supply
individual overrides in the presence of *func*. Returns energy if *dertype* is 0,
gradient if *dertype* is 1, else tuple of energy and gradient if *dertype*
unspecified. The dftd3 executable must be independently compiled and found in
:envvar:`PATH` or :envvar:`PSIPATH`.
*self* may be either a qcdb.Molecule (sensibly) or a psi4.Molecule
(works b/c psi4.Molecule has been extended by this method py-side and
only public interface fns used) or a string that can be instantiated
into a qcdb.Molecule.
"""
# Create (if necessary) and update qcdb.Molecule
if isinstance(self, Molecule):
# called on a qcdb.Molecule
pass
elif isinstance(self, core.Molecule):
# called on a python export of a psi4.core.Molecule (py-side through Psi4's driver)
self.create_psi4_string_from_molecule()
elif isinstance(self, basestring):
# called on a string representation of a psi4.Molecule (c-side through psi4.Dispersion)
self = Molecule(self)
else:
raise ValidationError("""Argument mol must be psi4string or qcdb.Molecule""")
self.update_geometry()
# # Validate arguments
# dashlvl = dashlvl.lower()
# dashlvl = dash_alias['-' + dashlvl][1:] if ('-' + dashlvl) in dash_alias.keys() else dashlvl
# if dashlvl not in dashcoeff.keys():
# raise ValidationError("""-D correction level %s is not available. Choose among %s.""" % (dashlvl, dashcoeff.keys()))
if dertype is None:
dertype = -1
elif der0th.match(str(dertype)):
dertype = 0
elif der1st.match(str(dertype)):
dertype = 1
# elif der2nd.match(str(dertype)):
# raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype))
else:
raise ValidationError('Requested derivative level \'dertype\' %s not valid for run_dftd3.' % (dertype))
# if func is None:
# if dashparam is None:
# # defunct case
# raise ValidationError("""Parameters for -D correction missing. Provide a func or a dashparam kwarg.""")
# else:
# # case where all param read from dashparam dict (which must have all correct keys)
# func = 'custom'
# dashcoeff[dashlvl][func] = {}
# dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems())
# for key in dashcoeff[dashlvl]['b3lyp'].keys():
# if key in dashparam.keys():
# dashcoeff[dashlvl][func][key] = dashparam[key]
# else:
# raise ValidationError("""Parameter %s is missing from dashparam dict %s.""" % (key, dashparam))
# else:
# func = func.lower()
# if func not in dashcoeff[dashlvl].keys():
# raise ValidationError("""Functional %s is not available for -D level %s.""" % (func, dashlvl))
# if dashparam is None:
# # (normal) case where all param taken from dashcoeff above
# pass
# else:
# # case where items in dashparam dict can override param taken from dashcoeff above
# dashparam = dict((k.lower(), v) for k, v in dashparam.iteritems())
# for key in dashcoeff[dashlvl]['b3lyp'].keys():
# if key in dashparam.keys():
# dashcoeff[dashlvl][func][key] = dashparam[key]
# TODO temp until figure out paramfile
allowed_funcs = ['HF/MINIS', 'DFT/MINIS', 'HF/MINIX', 'DFT/MINIX',
'HF/SV', 'DFT/SV', 'HF/def2-SV(P)', 'DFT/def2-SV(P)', 'HF/def2-SVP',
'DFT/def2-SVP', 'HF/DZP', 'DFT/DZP', 'HF/def-TZVP', 'DFT/def-TZVP',
'HF/def2-TZVP', 'DFT/def2-TZVP', 'HF/631Gd', 'DFT/631Gd',
'HF/def2-TZVP', 'DFT/def2-TZVP', 'HF/cc-pVDZ', 'DFT/cc-pVDZ',
'HF/aug-cc-pVDZ', 'DFT/aug-cc-pVDZ', 'DFT/SV(P/h,c)', 'DFT/LANL',
'DFT/pobTZVP', 'TPSS/def2-SVP', 'PW6B95/def2-SVP',
# specials
'hf3c', 'pbeh3c']
allowed_funcs = [f.lower() for f in allowed_funcs]
if func.lower() not in allowed_funcs:
raise Dftd3Error("""bad gCP func: %s. need one of: %r""" % (func, allowed_funcs))
# Move ~/.dftd3par.<hostname> out of the way so it won't interfere
defaultfile = os.path.expanduser('~') + '/.dftd3par.' + socket.gethostname()
defmoved = False
if os.path.isfile(defaultfile):
os.rename(defaultfile, defaultfile + '_hide')
defmoved = True
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Find out if running from Psi4 for scratch details and such
try:
import psi4
except ImportError as err:
isP4regime = False
else:
isP4regime = True
# Setup unique scratch directory and move in
current_directory = os.getcwd()
if isP4regime:
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path())
gcp_tmpdir = 'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.gcp.' + str(uuid.uuid4())[:8]
else:
gcp_tmpdir = os.environ['HOME'] + os.sep + 'gcp_' + str(uuid.uuid4())[:8]
if os.path.exists(gcp_tmpdir) is False:
os.mkdir(gcp_tmpdir)
os.chdir(gcp_tmpdir)
# Write gcp_parameters file that governs cp correction
# paramcontents = gcp_server(func, dashlvl, 'dftd3')
# paramfile1 = 'dftd3_parameters' # older patched name
# with open(paramfile1, 'w') as handle:
# handle.write(paramcontents)
# paramfile2 = '.gcppar'
# with open(paramfile2, 'w') as handle:
# handle.write(paramcontents)
###Two kinds of parameter files can be read in: A short and an extended version. Both are read from
###$HOME/.gcppar.$HOSTNAME by default. If the option -local is specified the file is read in from
###the current working directory: .gcppar
###The short version reads in: basis-keywo
# Write dftd3_geometry file that supplies geometry to dispersion calc
numAtoms = self.natom()
geom = self.save_string_xyz()
reals = []
for line in geom.splitlines():
lline = line.split()
if len(lline) != 4:
continue
if lline[0] == 'Gh':
numAtoms -= 1
else:
reals.append(line)
geomtext = str(numAtoms) + '\n\n'
for line in reals:
geomtext += line.strip() + '\n'
geomfile = './gcp_geometry.xyz'
with open(geomfile, 'w') as handle:
handle.write(geomtext)
# TODO somehow the variations on save_string_xyz and
# whether natom and chgmult does or doesn't get written
# have gotten all tangled. I fear this doesn't work
# the same btwn libmints and qcdb or for ghosts
# Call gcp program
command = ['gcp', geomfile]
command.extend(['-level', func])
if dertype != 0:
command.append('-grad')
try:
#print('command', command)
dashout = subprocess.Popen(command, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
raise ValidationError('Program gcp not found in path. %s' % e)
out, err = dashout.communicate()
# Parse output
success = False
for line in out.splitlines():
line = line.decode('utf-8')
if re.match(' Egcp:', line):
sline = line.split()
dashd = float(sline[1])
if re.match(' normal termination of gCP', line):
success = True
if not success:
os.chdir(current_directory)
raise Dftd3Error("""Unsuccessful gCP run.""")
# Parse grad output
if dertype != 0:
derivfile = './gcp_gradient'
dfile = open(derivfile, 'r')
dashdderiv = []
for line in geom.splitlines():
lline = line.split()
if len(lline) != 4:
continue
if lline[0] == 'Gh':
dashdderiv.append([0.0, 0.0, 0.0])
else:
dashdderiv.append([float(x.replace('D', 'E')) for x in dfile.readline().split()])
dfile.close()
if len(dashdderiv) != self.natom():
raise ValidationError('Program gcp gradient file has %d atoms- %d expected.' % \
(len(dashdderiv), self.natom()))
# Prepare results for Psi4
if isP4regime and dertype != 0:
core.set_variable('GCP CORRECTION ENERGY', dashd)
psi_dashdderiv = core.Matrix.from_list(dashdderiv)
# Print program output to file if verbose
if not verbose and isP4regime:
verbose = True if core.get_option('SCF', 'PRINT') >= 3 else False
if verbose:
text = '\n ==> GCP Output <==\n'
text += out.decode('utf-8')
if dertype != 0:
with open(derivfile, 'r') as handle:
text += handle.read().replace('D', 'E')
text += '\n'
if isP4regime:
core.print_out(text)
else:
print(text)
# # Clean up files and remove scratch directory
# os.unlink(paramfile1)
# os.unlink(paramfile2)
# os.unlink(geomfile)
# if dertype != 0:
# os.unlink(derivfile)
# if defmoved is True:
# os.rename(defaultfile + '_hide', defaultfile)
os.chdir('..')
# try:
# shutil.rmtree(dftd3_tmpdir)
# except OSError as e:
# ValidationError('Unable to remove dftd3 temporary directory %s' % e)
os.chdir(current_directory)
# return -D & d(-D)/dx
if dertype == -1:
return dashd, dashdderiv
elif dertype == 0:
return dashd
elif dertype == 1:
return psi_dashdderiv
try:
# Attach method to libmints psi4.Molecule class
core.Molecule.run_gcp = run_gcp
except (NameError, AttributeError):
# But don't worry if that doesn't work b/c
# it'll get attached to qcdb.Molecule class
pass
| jH0ward/psi4 | psi4/driver/qcdb/interface_gcp.py | Python | lgpl-3.0 | 11,855 | [
"Psi4"
] | c2692c7912f401fa1d86bb6280b8dab04c627a1789dc0516dbefe779b15d0e66 |
import os
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils import nodes
from docutils.writers.html4css1 import HTMLTranslator
from sphinx.latexwriter import LaTeXTranslator
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
return [node], []
try:
from docutils.parsers.rst import Directive
except ImportError:
# Register directive the old way:
from docutils.parsers.rst.directives import _directives
def math_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
latex = ' '.join(content)
node = latex_math(block_text)
node['latex'] = latex
return [node]
math_directive.arguments = None
math_directive.options = {}
math_directive.content = 1
_directives['math'] = math_directive
else:
class math_directive(Directive):
has_content = True
def run(self):
latex = ' '.join(self.content)
node = latex_math(self.block_text)
node['latex'] = latex
return [node]
from docutils.parsers.rst import directives
directives.register_directive('math', math_directive)
def setup(app):
app.add_node(latex_math)
app.add_role('math', math_role)
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
HTMLTranslator.visit_latex_math = visit_latex_math_html
HTMLTranslator.depart_latex_math = depart_latex_math_html
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation*}\\begin{split}',
node['latex'],
'\\end{split}\\end{equation*}'])
def depart_latex_math_latex(self, node):
pass
def visit_subscript(self, node):
self.body.append('$_{')
def depart_subscript(self, node):
self.body.append('}$')
def visit_superscript(self, node):
self.body.append('$^{')
def depart_superscript(self, node):
self.body.append('}$')
LaTeXTranslator.visit_latex_math = visit_latex_math_latex
LaTeXTranslator.depart_latex_math = depart_latex_math_latex
LaTeXTranslator.visit_subscript = visit_subscript
LaTeXTranslator.depart_subscript = depart_subscript
LaTeXTranslator.visit_superscript = visit_superscript
LaTeXTranslator.depart_superscript = depart_superscript
from os.path import isfile
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
name = 'math-' + md5(latex).hexdigest()[-10:]
pngname = '_static/%s.png' % name
txtname = '_static/%s.txt' % name
if not isfile(pngname):
depth = make_png(latex, pngname, inline)
txtfile = open(txtname, 'w')
print >> txtfile, depth
txtfile.close()
else:
depth = int(open(txtname).read().strip())
path = source.split('/doc/')[-1].count('/') * '../' + '_static'
if inline:
cls = ''
align = 'style="vertical-align: -%dpx" ' % depth
term = ''
else:
cls = 'class="center" '
align = ''
term = '<br>'
return '<img src="%s/%s.png" alt="%s" %s%s />%s' % (path, name, latex,
align, cls, term)
def make_png(latex, name, inline):
"""Make png file and return the depth relative to baseline."""
# Unfortunately we have to store depth info
print latex,
f = open('math.tex', 'w')
f.write(r"""\documentclass{article}
\usepackage[active]{preview}
\usepackage{amsmath,amssymb}
\begin{document}
\begin{preview}""")
if inline:
f.write(r'$%s$' % latex)
else:
f.write(r'\[ %s \]' % latex)
f.write(r'\end{preview}\end{document}')
f.close()
status = os.system('latex --interaction=nonstopmode math.tex > /dev/null')
if status != 0:
raise RuntimeError('mathpng failed on equation: ' + latex)
cmd = ('dvipng -bgTransparent -Ttight --noghostscript -l10 ' +
'--depth -D 136 -o %s math.dvi' % name)
dvipng = os.popen(cmd, 'r')
output = dvipng.read()
depth = int(output.split('=')[-1].strip()[:-1])
return depth
def main():
latex = r'\sum_{\mu\nu} T_{\mu \nu} \rho_{\nu\mu}'
#latex = r'$\beta_0$'
make_png(latex, 'pngtestfile.png', True)
if __name__ == '__main__':
main()
| qsnake/gpaw | doc/mathpng.py | Python | gpl-3.0 | 5,150 | [
"VisIt"
] | 0059f771d0e3829e0079a996008bd3b77e9c5f96c0180cfba64b74f50d0bb4e4 |
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2019 Jonathan Dent, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import bpy
from ..utils import util
class ASCAMERA_PT_lens(bpy.types.Panel):
bl_label = "Lens"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_context = "data"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER' and context.active_object.type == 'CAMERA'
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cam = context.camera
asr_cam_props = cam.appleseed
layout.row().prop(cam, "type")
col = layout.column()
if cam.type == 'PERSP':
if cam.lens_unit == 'MILLIMETERS':
col.prop(cam, "lens")
elif cam.lens_unit == 'FOV':
col.prop(cam, "angle")
col.prop(cam, "lens_unit")
elif cam.type == 'ORTHO':
col.prop(cam, "ortho_scale")
elif cam.type == "PANO":
row = col.row()
row.prop(asr_cam_props, "fisheye_projection_type", text="Fisheye Projection")
row = col.row()
if cam.lens_unit == 'MILLIMETERS':
row.prop(cam, "lens")
elif cam.lens_unit == 'FOV':
row.prop(cam, "angle")
row.prop(cam, "lens_unit", text="")
class ASCAMERA_PT_lens_shift(bpy.types.Panel):
bl_parent_id = "ASCAMERA_PT_lens"
bl_label = "Shift"
bl_options = {'DEFAULT_CLOSED'}
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_context = "data"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER' and context.active_object.type == 'CAMERA'
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cam = context.camera
col = layout.column(align=True)
col.prop(cam, "shift_x", text="X")
col.prop(cam, "shift_y", text="Y")
class ASCAMERA_PT_lens_clip(bpy.types.Panel):
bl_parent_id = "ASCAMERA_PT_lens"
bl_label = "Clip"
bl_options = {'DEFAULT_CLOSED'}
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_context = "data"
@classmethod
def poll(cls, context):
renderer = context.scene.render
return renderer.engine == 'APPLESEED_RENDER' and context.active_object.type == 'CAMERA'
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cam = context.camera
scene = context.scene
asr_cam_props = scene.camera.data.appleseed
col = layout.column(align=True)
col.prop(asr_cam_props, "near_z", text="Clip Start")
col.prop(cam, "clip_end", text="End")
class ASCAMERA_PT_dof(bpy.types.Panel):
bl_label = "Depth of Field"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
COMPAT_ENGINES = {'APPLESEED_RENDER'}
bl_context = "data"
@classmethod
def poll(cls, context):
renderer = context.scene.render
is_context = renderer.engine == 'APPLESEED_RENDER' and context.active_object.type == 'CAMERA'
if hasattr(context.active_object.data, "type"):
is_model = context.active_object.data.type == 'PERSP' or (context.active_object.data.type == 'PANO' \
and context.active_object.data.appleseed.fisheye_projection_type is not 'none')
else:
is_model = False
return is_context and is_model
def draw_header(self, context):
header = self.layout
scene = context.scene
asr_cam_props = scene.camera.data.appleseed
header.prop(asr_cam_props, "enable_dof", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
cam = scene.camera
asr_cam_props = scene.camera.data.appleseed
layout.active = asr_cam_props.enable_dof
layout.prop(asr_cam_props, "enable_autofocus", text="Enable Autofocus")
col = layout.column()
col.active = not asr_cam_props.enable_autofocus
row = col.row()
row.active = cam.data.dof.focus_object is None
row.prop(cam.data.dof, "focus_distance", text="Focal Distance")
row = col.row()
row.prop(cam.data.dof, "focus_object", text='Focus on Object')
layout.prop(asr_cam_props, "f_number", text="F-Number")
layout.prop(asr_cam_props, "diaphragm_blades", text="Blades")
layout.prop(asr_cam_props, "diaphragm_angle", text="Tilt Angle")
layout.template_ID(asr_cam_props, "diaphragm_map", open="image.open")
if asr_cam_props.diaphragm_map != None:
as_diaphragm_map = asr_cam_props.diaphragm_map
layout.prop(as_diaphragm_map.appleseed, "as_color_space", text="Color Space")
layout.prop(as_diaphragm_map.appleseed, "as_wrap_mode", text="Wrap Mode")
layout.prop(as_diaphragm_map.appleseed, "as_alpha_mode", text="Alpha Mode")
classes = (
ASCAMERA_PT_dof,
ASCAMERA_PT_lens,
ASCAMERA_PT_lens_shift,
ASCAMERA_PT_lens_clip
)
def register():
for cls in classes:
util.safe_register_class(cls)
def unregister():
for cls in reversed(classes):
util.safe_unregister_class(cls)
| dictoon/blenderseed | ui/camera.py | Python | mit | 6,748 | [
"VisIt"
] | 73a5e30a3bde0188b6f7ee461b594cbcae15795e6498c90e8a80fc9461ebbcdd |
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Code is originally adapted from MILK: Machine Learning Toolkit
# Copyright (C) 2008-2011, Luis Pedro Coelho <luis@luispedro.org>
# License: MIT. See COPYING.MIT file in the milk distribution
# Authors: Brian Holt, Peter Prettenhofer, Satrajit Ghosh, Gilles Louppe
# License: BSD3
from __future__ import division
import numpy as np
from abc import ABCMeta, abstractmethod
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..feature_selection.selector_mixin import SelectorMixin
from ..utils import array2d, check_random_state
from ..utils.validation import check_arrays
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CLASSIFICATION = {
"gini": _tree.Gini,
"entropy": _tree.Entropy,
}
REGRESSION = {
"mse": _tree.MSE,
}
def export_graphviz(decision_tree, out_file=None, feature_names=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to graphviz.
out : file object or string, optional (default=None)
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
Returns
-------
out_file : file object
The file object to which the tree was exported. The user is
expected to `close()` this object when done with it.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> import tempfile
>>> out_file = tree.export_graphviz(clf, out_file=tempfile.TemporaryFile())
>>> out_file.close()
"""
def node_to_str(tree, node_id):
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "error = %.4f\\nsamples = %s\\nvalue = %s" \
% (tree.init_error[node_id],
tree.n_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\nerror = %s\\nsamples = %s\\nvalue = %s" \
% (feature,
tree.threshold[node_id],
tree.init_error[node_id],
tree.n_samples[node_id],
value)
def recurse(tree, node_id, parent=None):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF: # and right_child != _tree.TREE_LEAF
recurse(tree, left_child, node_id)
recurse(tree, right_child, node_id)
if out_file is None:
out_file = open("tree.dot", "w")
elif isinstance(out_file, basestring):
out_file = open(out_file, "w")
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0)
else:
recurse(decision_tree.tree_, 0)
out_file.write("}")
return out_file
class BaseDecisionTree(BaseEstimator, SelectorMixin):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self,
criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state):
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_density = min_density
self.max_features = max_features
self.compute_importances = compute_importances
self.random_state = random_state
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.find_split_ = _tree.TREE_SPLIT_BEST
self.tree_ = None
self.feature_importances_ = None
def fit(self, X, y, sample_mask=None, X_argsorted=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples. Use ``dtype=np.float32``
and ``order='F'`` for maximum efficiency.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (integers that correspond to classes in
classification, real numbers in regression).
Use ``dtype=np.float64`` and ``order='C'`` for maximum
efficiency.
sample_mask : array-like, shape = [n_samples], dtype = bool or None
A bit mask that encodes the rows of ``X`` that should be
used to build the decision tree. It can be used for bagging
without the need to create of copy of ``X``.
If None a mask will be created that includes all samples.
X_argsorted : array-like, shape = [n_samples, n_features] or None
Each column of ``X_argsorted`` holds the row indices of ``X``
sorted according to the value of the corresponding feature
in ascending order.
I.e. ``X[X_argsorted[i, k], k] <= X[X_argsorted[j, k], k]``
for each j > i.
If None, ``X_argsorted`` is computed internally.
The argument is supported to enable multiple decision trees
to share the data structure and to avoid re-computation in
tree ensembles. For maximum efficiency use dtype np.int32.
check_input: boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
if check_input:
X, y = check_arrays(X, y)
self.random_state = check_random_state(self.random_state)
# set min_samples_split sensibly
self.min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Convert data
if (getattr(X, "dtype", None) != DTYPE or X.ndim != 2 or not
X.flags.fortran):
X = array2d(X, dtype=DTYPE, order="F")
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
for k in xrange(self.n_outputs_):
unique = np.unique(y[:, k])
self.classes_.append(unique)
self.n_classes_.append(unique.shape[0])
y[:, k] = np.searchsorted(unique, y[:, k])
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if is_classification:
criterion = CLASSIFICATION[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = REGRESSION[self.criterion](self.n_outputs_)
# Check parameters
max_depth = np.inf if self.max_depth is None else self.max_depth
if isinstance(self.max_features, basestring):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
else:
max_features = self.max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if self.min_density < 0.0 or self.min_density > 1.0:
raise ValueError("min_density must be in [0, 1]")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if sample_mask is not None:
sample_mask = np.asarray(sample_mask, dtype=np.bool)
if sample_mask.shape[0] != n_samples:
raise ValueError("Length of sample_mask=%d does not match "
"number of samples=%d"
% (sample_mask.shape[0], n_samples))
if X_argsorted is not None:
X_argsorted = np.asarray(X_argsorted, dtype=np.int32,
order='F')
if X_argsorted.shape != X.shape:
raise ValueError("Shape of X_argsorted does not match "
"the shape of X")
# Build tree
self.tree_ = _tree.Tree(self.n_features_, self.n_classes_,
self.n_outputs_, criterion, max_depth,
self.min_samples_split, self.min_samples_leaf,
self.min_density, max_features,
self.find_split_, self.random_state)
self.tree_.build(X, y, sample_mask=sample_mask,
X_argsorted=X_argsorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
if self.compute_importances:
self.feature_importances_ = \
self.tree_.compute_feature_importances()
return self
def predict(self, X):
"""Predict class or regression target for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE, order="F")
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return np.array(self.classes_.take(
np.argmax(proba[:, 0], axis=1), axis=0))
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1), axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0, 0]
else:
return proba[:, :, 0]
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
max_features : int, string or None, optional (default=None)
The number of features to consider when looking for the best split.
If "auto", then `max_features=sqrt(n_features)` on classification
tasks and `max_features=n_features` on regression problems. If "sqrt",
then `max_features=sqrt(n_features)`. If "log2", then
`max_features=log2(n_features)`. If None, then
`max_features=n_features`.
compute_importances : boolean, optional (default=False)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
The importance I(f) of a feature f is computed as the (normalized)
total reduction of error brought by that feature. It is also known as
the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features=None,
compute_importances=False,
random_state=None):
super(DecisionTreeClassifier, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by arithmetical order.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE, order="F")
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, 0, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in xrange(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. Classes are
ordered by arithmetical order.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in xrange(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : integer, optional (default=1)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_density : float, optional (default=0.1)
This parameter controls a trade-off in an optimization heuristic. It
controls the minimum density of the `sample_mask` (i.e. the
fraction of samples in the mask). If the density falls below this
threshold the mask is recomputed and the input data is packed
which results in data copying. If `min_density` equals to one,
the partitions are always represented as copies of the original
data. Otherwise, partitions are represented as bit masks (aka
sample masks).
max_features : int, string or None, optional (default=None)
The number of features to consider when looking for the best split.
If "auto", then `max_features=sqrt(n_features)` on classification
tasks and `max_features=n_features` on regression problems. If "sqrt",
then `max_features=sqrt(n_features)`. If "log2", then
`max_features=log2(n_features)`. If None, then
`max_features=n_features`.
compute_importances : boolean, optional (default=True)
Whether feature importances are computed and stored into the
``feature_importances_`` attribute when calling fit.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`tree_` : Tree object
The underlying Tree object.
`feature_importances_` : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
The importance I(f) of a feature f is computed as the (normalized)
total reduction of error brought by that feature. It is also known as
the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
R2 scores (a.k.a. coefficient of determination) over 10-folds CV:
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features=None,
compute_importances=False,
random_state=None):
super(DecisionTreeRegressor, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
compute_importances=False,
random_state=None):
super(ExtraTreeClassifier, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
self.find_split_ = _tree.TREE_SPLIT_RANDOM
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier : A classifier base on extremely randomized trees
sklearn.ensemble.ExtraTreesClassifier : An ensemble of extra-trees for
classification
sklearn.ensemble.ExtraTreesRegressor : An ensemble of extra-trees for
regression
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
max_depth=None,
min_samples_split=1,
min_samples_leaf=1,
min_density=0.1,
max_features="auto",
compute_importances=False,
random_state=None):
super(ExtraTreeRegressor, self).__init__(criterion,
max_depth,
min_samples_split,
min_samples_leaf,
min_density,
max_features,
compute_importances,
random_state)
self.find_split_ = _tree.TREE_SPLIT_RANDOM
| mrshu/scikit-learn | sklearn/tree/tree.py | Python | bsd-3-clause | 29,584 | [
"Brian"
] | 76fe85ecaaafb7bfef3a7be69c0c9e7526e33781c8fbafc0b45629418ca3e08f |
#
# Handle the special case of the first scenario
#
self.notebook.switchScenario(0,scenarioType="Powder")
#
#
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'vasp'
tab.settings['Output file name'] = 'OUTCAR'
tab.settings['Excel file name'] = ''
tab.settings['Script file name'] = 'atr_fitted_settings.py'
tab.settings['QM program'] = 'vasp'
#
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = True
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'average'
tab.settings['Optical permittivity edited'] = False
tab.sigmas_cm1 = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10.0, 200.0, 5, 10.0, 5, 10.0, 5, 100.0, 5, 5, 35.0, 5, 130.0, 5, 100.0, 5]
#
#
tab = self.notebook.scenarios[0]
tab.settings['Legend'] = 'Scenario 1'
tab.settings['Scenario type'] = 'Powder'
tab.settings['Matrix'] = 'air'
tab.settings['Matrix density'] = 0.001225
tab.settings['Matrix permittivity'] = 1.0
tab.settings['Bubble radius'] = 30.0
tab.settings['Bubble volume fraction'] = 0.0
tab.settings['Mass fraction'] = 0.999882372589573
tab.settings['Volume fraction'] = 0.7999999999999295
tab.settings['Particle size(mu)'] = 0.0001
tab.settings['Particle size distribution sigma(mu)'] = 0.0
tab.settings['Ellipsoid a/b'] = 1.0
tab.settings['Unique direction - h'] = 0
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 1
tab.settings['Mass or volume fraction'] = 'volume'
tab.settings['ATR material refractive index'] = 4.0
tab.settings['ATR theta'] = 45.0
tab.settings['ATR S polarisation fraction'] = 0.5
tab.settings['Effective medium method'] = 'Maxwell-Garnett'
tab.settings['Particle shape'] = 'Sphere'
#
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 400
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
#
tab = self.notebook.viewerTab
tab.settings['Atom scaling'] = 0.5
tab.settings['Maximum displacement'] = 1.0
tab.settings['Bond colour'] = [80, 80, 80, 255]
tab.settings['Bond radius'] = 0.1
tab.settings['Cell colour'] = [255, 0, 0, 255]
tab.settings['Cell radius'] = 0.1
tab.settings['Background colour'] = [120, 120, 120, 255]
tab.settings['Arrow colour'] = [0, 255, 0, 255]
tab.settings['Arrow radius'] = 0.07
tab.settings['Number of phase steps'] = 41
tab.settings['Super Cell'] = [1, 1, 1]
#
#
tab = self.notebook.fitterTab
tab.settings['Excel file name'] = 'ATR_Spectra_for_fitting.xlsx'
tab.settings['Plot title'] = 'Experimental and Calculated Spectral Comparison'
tab.settings['Fitting type'] = 'Minimise x-correlation'
tab.settings['Number of iterations'] = 20
tab.settings['Frequency scaling factor'] = 1.0
tab.settings['Optimise frequency scaling'] = True
tab.settings['Spectrum scaling'] = False
tab.settings['Spectrum scaling factor'] = 1.0
tab.settings['Independent y-axes'] = False
tab.settings['Spectral difference threshold'] = 0.05
tab.settings['HPFilter lambda'] = 7.0
tab.settings['Baseline removal'] = False
tab.settings['Scenario index'] = 0
#
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 300
tab.settings['Maximum frequency'] = 1500
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Number of atoms'] = 1
tab.settings['Plot type'] = 'Powder ATR'
tab.settings['concentration'] = 10.933228754034278
tab.settings['cell concentration'] = 10.933228754034278
| JohnKendrick/PDielec | Examples/ATR/Na2SO42_fit/script.py | Python | mit | 3,591 | [
"VASP"
] | c9af12dd6fcc4f1d388e9f934eb8c5e2c1ec1392769d86663618b9ba3456df2b |
#
# Parse tree nodes for expressions
#
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object)
import sys
import copy
import operator
from Errors import error, warning, warn_once, InternalError, CompileError
from Errors import hold_errors, release_errors, held_errors, report_error
from Code import UtilityCode, TempitaUtilityCode
import StringEncoding
import Naming
import Nodes
from Nodes import Node
import PyrexTypes
from PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
import TypeSlots
from Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
import Builtin
import Symtab
from Cython import Utils
from Annotate import AnnotationItem
from Cython.Compiler import Future
from Cython.Debugging import print_call_chain
from DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
try:
from __builtin__ import basestring
except ImportError:
basestring = str # Python 3
try:
from builtins import bytes
except ImportError:
bytes = str # Python 2
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return "<NOT CONSTANT>"
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(Builtin.unicode_type, Builtin.bytes_type) : "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(Builtin.unicode_type, Builtin.str_type) : "Cannot convert Unicode string to 'str' implicitly. This is not portable and requires explicit encoding.",
(Builtin.unicode_type, PyrexTypes.c_char_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.unicode_type, PyrexTypes.c_uchar_ptr_type) : "Unicode objects only support coercion to Py_UNICODE*.",
(Builtin.bytes_type, Builtin.unicode_type) : "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(Builtin.bytes_type, Builtin.str_type) : "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(Builtin.bytes_type, Builtin.basestring_type) : "Cannot convert 'bytes' object to basestring implicitly. This is not portable to Py3.",
(Builtin.bytes_type, PyrexTypes.c_py_unicode_ptr_type) : "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(Builtin.basestring_type, Builtin.bytes_type) : "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(Builtin.str_type, Builtin.unicode_type) : "str objects do not support coercion to unicode, use a unicode string literal instead (u'')",
(Builtin.str_type, Builtin.bytes_type) : "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(Builtin.str_type, PyrexTypes.c_char_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_uchar_ptr_type) : "'str' objects do not support coercion to C types (use 'bytes'?).",
(Builtin.str_type, PyrexTypes.c_py_unicode_ptr_type) : "'str' objects do not support coercion to C types (use 'unicode'?).",
(PyrexTypes.c_char_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_uchar_ptr_type, Builtin.unicode_type) : "Cannot convert 'char*' to unicode implicitly, decoding required",
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif ((PyrexTypes.c_char_ptr_type in type_tuple or PyrexTypes.c_uchar_ptr_type in type_tuple)
and env.directives['c_string_encoding']):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if (node is None
or not isinstance(node.constant_result, (int, float, long))):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
return item_types.pop()
return None
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# result_code/temp_result can safely be set to None
result_ctype = None
type = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = 0
is_string_literal = 0
is_attribute = 0
is_subscript = 0
saved_subexpr_nodes = None
is_temp = 0
is_target = 0
is_starred = 0
constant_result = constant_value_not_set
# whether this node with a memoryview type should be broadcast
memslice_broadcast = False
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
return self.temp_code
else:
return self.calculate_result_code()
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s" % (
self.old_temp, self.__class__.__name__))
else:
raise RuntimeError("no temp, but release requested in %s" % (
self.__class__.__name__))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
If result is a pyobject, make sure we own a reference to it.
If the result is in a temp, it is already a new reference.
"""
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
if not self.result_in_temp():
code.put_incref_memoryviewslice(self.result(),
have_gil=self.in_nogil_context)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
code.mark_pos(self.pos)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp:
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
if dst_type.is_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_const:
src_type = src_type.const_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(
dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" %
(src_type,))
elif not MemoryView.src_conforms_to_dst(
src.type, dst_type, broadcast=self.memslice_broadcast):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif src.type.is_pyobject:
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
if not (str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.has_constant_result():
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return self.value and 'Py_True' or 'Py_False'
else:
return str(int(self.value))
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type)
return ConstNode.coerce_to(self, dst_type, env)
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
self.constant_result in (constant_value_not_set, not_a_constant) or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = dst_type, is_c_literal = True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type = PyrexTypes.py_object_type, is_c_literal = False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = str(Utils.str_to_number(self.value))
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
return self.value_as_c_integer_string() + self.unsigned + self.longness
def value_as_c_integer_string(self):
value = self.value
if len(value) > 2:
# convert C-incompatible Py3 oct/bin notations
if value[1] in 'oO':
value = value[0] + value[2:] # '0o123' => '0123'
elif value[1] in 'bB':
value = int(value[2:], 2)
return str(value)
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, (str, unicode))
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.BytesLiteral(self.value[start:stop:step])
value.encoding = self.value.encoding
return BytesNode(
self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value
def analyse_as_type(self, env):
type = PyrexTypes.parse_basic_type(self.value)
if type is not None:
return type
from TreeFragment import TreeFragment
pos = (self.pos[0], self.pos[1], self.pos[2]-7)
declaration = TreeFragment(u"sizeof(%s)" % self.value, name=pos[0].filename, initial_pos=pos)
sizeof_node = declaration.root.stats[0].expr
sizeof_node = sizeof_node.analyse_types(env)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value,
constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type == PyrexTypes.c_char_ptr_type:
node.type = dst_type
return node
elif dst_type == PyrexTypes.c_uchar_ptr_type:
node.type = PyrexTypes.c_char_ptr_type
return CastNode(node, PyrexTypes.c_uchar_ptr_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_string_const(self.value)
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string
# ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results
# in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.BytesLiteral(
self.bytes_value[start:stop:step])
bytes_value.encoding = self.bytes_value.encoding
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos,
"Only single-character Unicode string literals or "
"surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value),
constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a
# C char* context
return BytesNode(self.pos, value=self.bytes_value
).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
node = UnicodeNode(self.pos, value=self.value)
node.type = dst_type
return node
error(self.pos,
"Unicode literals do not support coercion to C types other "
"than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
"(for strings).")
elif dst_type not in (py_object_type, Builtin.basestring_type):
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
if self.contains_surrogates():
# surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_pyunicode_ptr_const(self.value)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PyUnicode_FromUnicode(%s, (sizeof(%s) / sizeof(Py_UNICODE))-1); %s" % (
self.result_code,
data_cname,
data_cname,
code.error_goto_if_null(self.result_code, self.pos)))
code.putln("#if CYTHON_PEP393_ENABLED")
code.put_error_if_neg(
self.pos, "PyUnicode_READY(%s)" % self.result_code)
code.putln("#endif")
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
if self.unicode_value is not None:
# only the Unicode value is portable across Py2/3
self.constant_result = self.unicode_value
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
if dst_type is not Builtin.basestring_type:
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value float imaginary part
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, self.value)
def compile_time_value(self, denv):
return complex(0.0, self.value)
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = PyrexTypes.py_object_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.scope.lookup(u'<init>')
if constructor is None:
func_type = PyrexTypes.CFuncType(type, [], exception_check='+')
type.scope.declare_cfunction(u'<init>', func_type, self.pos)
constructor = type.scope.lookup(u'<init>')
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.declaration_code("")
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
inferred_type = None
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
if self.inferred_type is not None:
return self.inferred_type
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
# If entry is inferred as pyobject it's safe to use local
# NameNode's inferred_type.
if self.entry.type.is_pyobject and self.inferred_type:
# Overflow may happen if integer
if not (self.inferred_type.is_int and self.entry.might_overflow):
return self.inferred_type
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
if self.entry is None:
self.entry = env.lookup(self.name)
if not self.entry:
self.entry = env.declare_builtin(self.name, self.pos)
if not self.entry:
self.type = PyrexTypes.error_type
return self
entry = self.entry
if entry:
entry.used = 1
if entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env, is_target=True)
if (not self.is_lvalue() and self.entry.is_cfunction and
self.entry.fused_cfunction and self.entry.as_variable):
# We need this for the fused 'def' TreeFragment
self.entry = self.entry.as_variable
self.type = self.entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'"
% self.name)
self.type = PyrexTypes.error_type
self.entry.used = 1
if self.entry.type.is_buffer:
import Buffer
Buffer.used_buffer_aux_vars(self.entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
elif self.entry.type.is_memoryviewslice:
if self.cf_is_null or self.cf_maybe_null:
import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env)
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if not (entry.is_const or entry.is_variable
or entry.is_builtin or entry.is_cfunction
or entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
else:
error(self.pos,
"'%s' is not a constant, variable or function identifier" % self.name)
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (entry.is_const or entry.is_cfunction or entry.is_builtin):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return self.entry.is_variable and \
not self.entry.type.is_array and \
not self.entry.is_readonly
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "<error>" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_builtin:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetModuleGlobalName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetNameInClass(%s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
setter = 'PyObject_SetItem'
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
assigned = True
if entry.is_cglobal:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref_set(
self.result(), rhs.result_as(self.ctype()))
else:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
if not assigned:
code.putln('%s = %s;' % (
self.result(), rhs.result_as(self.ctype())))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln('if (unlikely(%s < 0)) { if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s }' % (
del_code,
code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
if self.entry.in_closure:
# generator
if ignore_nonexisting and self.cf_maybe_null:
code.put_xgotref(self.result())
else:
code.put_gotref(self.result())
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
style, text = 'py_call', 'python function (%s)'
else:
style, text = 'c_call', 'c function (%s)'
code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if (env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
self.is_temp = 1
env.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
return self
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.putln(
"%s = __Pyx_Import(%s, %s, %d); %s" % (
self.result(),
self.module_name.py_result(),
name_list_code,
self.level,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
cpp_iterator_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
self.analyse_cpp_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type is list_type or \
self.sequence.type is tuple_type:
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def analyse_cpp_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
if self.sequence.is_name:
# safe: C++ won't allow you to reassign to class references
begin_func = "%s.begin" % self.sequence.result()
else:
sequence_type = PyrexTypes.c_ptr_type(sequence_type)
self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
begin_func = "%s->begin" % self.cpp_iterator_cname
# TODO: Limit scope.
code.putln("%s = %s();" % (self.result(), begin_func))
return
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type is list_type or \
sequence_type is tuple_type
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (PyList_CheckExact(%s) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln(
"%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value
))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (self.iter_func_ptr, self.py_result()))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
if self.sequence.is_sequence_constructor:
item_count = len(self.sequence.args)
if self.sequence.mult_factor is None:
final_size = item_count
elif isinstance(self.sequence.mult_factor.constant_result, (int, long)):
final_size = item_count * self.sequence.mult_factor.constant_result
code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type.is_cpp_class:
if self.cpp_iterator_cname:
end_func = "%s->end" % self.cpp_iterator_cname
else:
end_func = "%s.end" % self.sequence.result()
# TODO: Cache end() call?
code.putln("if (!(%s != %s())) break;" % (
self.result(),
end_func))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
return
elif sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
for test_name in ('List', 'Tuple'):
code.putln("if (!%s && Py%s_CheckExact(%s)) {" % (
self.iter_func_ptr, test_name, self.py_result()))
self.generate_next_sequence_item(test_name, result_name, code)
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
if self.cpp_iterator_cname:
code.funcstate.release_temp(self.cpp_iterator_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = iterator.next()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type = None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_const:
item_type = item_type.const_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
subexprs = ['args']
test_if_run = True
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_evaluation_code(self, code):
if self.test_if_run:
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
self.args.generate_evaluation_code(code)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
self.args.generate_disposal_code(code)
self.args.free_temps(code)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
if self.result_is_used:
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if self.test_if_run:
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class IndexNode(ExprNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# indices [ExprNode]
# type_indices [PyrexType]
# is_buffer_access boolean Whether this is a buffer access.
#
# indices is used on buffer access, index on non-buffer access.
# The former contains a clean list of index parameters, the
# latter whatever Python object is needed for index access.
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index', 'indices']
indices = None
type_indices = None
is_subscript = True
is_fused_index = False
# Whether we're assigning to a buffer (in that case it needs to be
# writable)
writable_needed = False
# Whether we are indexing or slicing a memoryviewslice
memslice_index = False
memslice_slice = False
is_memslice_copy = False
memslice_ellipsis_noop = False
warned_untyped_idx = False
# set by SingleAssignmentNode after analyse_types()
is_memslice_scalar_assignment = False
def __init__(self, pos, index, **kw):
ExprNode.__init__(self, pos, index=index, **kw)
self._index = index
def calculate_constant_result(self):
self.constant_result = \
self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception, e:
self.compile_time_value_error(e)
def is_ephemeral(self):
return self.base.is_ephemeral()
def is_simple(self):
if self.is_buffer_access or self.memslice_index:
return False
elif self.memslice_slice:
return True
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if isinstance(self.index, SliceNode):
# slicing!
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
import Nodes
type_node = Nodes.TemplatedTypeNode(
pos = self.pos,
positional_args = template_values,
keyword_args = None)
return type_node.analyse(env, base_type = base_type)
else:
index = self.index.compile_time_value(env)
if index is not None:
return PyrexTypes.CArrayType(base_type, int(index))
error(self.pos, "Array size must be a compile time constant")
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if isinstance(self.index, SliceNode):
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type,
bytearray_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif base_type is bytearray_type:
return PyrexTypes.c_uchar_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type in (tuple_type, list_type):
# if base is a literal, take a look at its values
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is not None:
return item_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
return self.analyse_base_and_index_types(env, getting=True)
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const:
error(self.pos, "Assignment to const dereference")
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
# Note that this function must leave IndexNode in a cloneable state.
# For buffers, self.index is packed out on the initial analysis, and
# when cloning self.indices is copied.
self.is_buffer_access = False
# a[...] = b
self.is_memslice_copy = False
# incomplete indexing, Ellipsis indexing or slicing
self.memslice_slice = False
# integer indexing
self.memslice_index = False
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = isinstance(self.index, SliceNode)
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
skip_child_analysis = False
buffer_access = False
if self.indices:
indices = self.indices
elif isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
if (is_memslice and not self.indices and
isinstance(self.index, EllipsisNode)):
# Memoryviewslice copying
self.is_memslice_copy = True
elif is_memslice:
# memoryviewslice indexing or slicing
import MemoryView
skip_child_analysis = True
newaxes = [newaxis for newaxis in indices if newaxis.is_none]
have_slices, indices = MemoryView.unellipsify(indices,
newaxes,
self.base.type.ndim)
self.memslice_index = (not newaxes and
len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" %
self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if not index.is_none:
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if isinstance(index, SliceNode):
self.memslice_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.is_none:
self.memslice_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more "
"efficient access", level=2)
IndexNode.warned_untyped_idx = True
self.memslice_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified")
return self
self.memslice_index = self.memslice_index and not self.memslice_slice
self.original_indices = indices
# All indices with all start/stop/step for slices.
# We need to keep this around
self.indices = new_indices
self.env = env
elif self.base.type.is_buffer:
# Buffer indexing
if len(indices) == self.base.type.ndim:
buffer_access = True
skip_child_analysis = True
for x in indices:
x = x.analyse_types(env)
if not x.type.is_int:
buffer_access = False
if buffer_access and not self.base.type.is_memoryviewslice:
assert hasattr(self.base, "entry") # Must be a NameNode-like node
# On cloning, indices is cloned. Otherwise, unpack index into indices
assert not (buffer_access and isinstance(self.index, CloneNode))
self.nogil = env.nogil
if buffer_access or self.memslice_index:
#if self.base.type.is_memoryviewslice and not self.base.is_name:
# self.base = self.base.coerce_to_temp(env)
self.base = self.base.coerce_to_simple(env)
self.indices = indices
self.index = None
self.type = self.base.type.dtype
self.is_buffer_access = True
self.buffer_type = self.base.type #self.base.entry.type
if getting and self.type.is_pyobject:
self.is_temp = True
if setting and self.base.type.is_memoryviewslice:
self.base.type.writable_needed = True
elif setting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
elif self.is_memslice_copy:
self.type = self.base.type
if getting:
self.memslice_ellipsis_noop = True
else:
self.memslice_broadcast = True
elif self.memslice_slice:
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return self
self.type = PyrexTypes.MemoryViewSliceType(
self.base.type.dtype, axes)
if (self.base.type.is_memoryviewslice and not
self.base.is_name and not
self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
if setting:
self.memslice_broadcast = True
else:
base_type = self.base.type
if not base_type.is_cfunction:
if isinstance(self.index, TupleNode):
self.index = self.index.analyse_types(
env, skip_children=skip_child_analysis)
elif not skip_child_analysis:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if setting:
warning(self.pos, "cannot assign to Unicode string index", level=1)
elif self.index.constant_result in (0, -1):
# uchar[0] => uchar
return self.base
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
if self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
self.original_index_type.create_to_py_utility_code(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif self.index.type.is_int and base_type is bytearray_type:
if setting:
self.type = PyrexTypes.c_uchar_type
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
elif is_slice and base_type in (bytes_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is None:
item_type = py_object_type
self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
else:
if base_type.is_ptr or base_type.is_array:
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(
PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos,
"Invalid index type '%s'" %
self.index.type)
elif base_type.is_cpp_class:
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return self
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
elif base_type.is_cfunction:
if base_type.is_fused:
self.parse_indexed_fused_cdef(env)
else:
self.type_indices = self.parse_index_as_types(env)
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
self.wrap_in_nonecheck_node(env, getting)
return self
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
if self.base.type.is_memoryviewslice:
if self.is_memslice_copy and not getting:
msg = "Cannot assign to None memoryview slice"
elif self.memslice_slice:
msg = "Cannot slice None memoryview slice"
else:
msg = "Cannot index None memoryview slice"
else:
msg = "'NoneType' object is not subscriptable"
self.base = self.base.as_none_safe_node(msg)
def parse_index_as_types(self, env, required=True):
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
type_indices = []
for index in indices:
type_indices.append(index.analyse_as_type(env))
if type_indices[-1] is None:
if required:
error(index.pos, "not parsable as a type")
return None
return type_indices
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
specific_types = []
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_types = self.parse_index_as_types(env, required=False)
if specific_types is None:
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not Utils.any([specific_type.same_as(t)
for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def nogil_check(self, env):
if self.is_buffer_access or self.memslice_index or self.memslice_slice:
if not self.memslice_slice and env.directives['boundscheck']:
# error(self.pos, "Cannot check buffer index bounds without gil; "
# "use boundscheck(False) directive")
warning(self.pos, "Use boundscheck(False) for faster access",
level=1)
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
return
super(IndexNode, self).nogil_check(env)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
elif self.type.is_array:
# fixed-sized arrays aren't l-values
return False
# Just about everything else returned by the index operator
# can be an lvalue.
return True
def calculate_result_code(self):
if self.is_buffer_access:
return "(*%s)" % self.buffer_ptr_code
elif self.is_memslice_copy:
return self.base.result()
elif self.base.type in (list_type, tuple_type, bytearray_type):
if self.base.type is list_type:
index_code = "PyList_GET_ITEM(%s, %s)"
elif self.base.type is tuple_type:
index_code = "PyTuple_GET_ITEM(%s, %s)"
elif self.base.type is bytearray_type:
index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
else:
assert False, "unexpected base type in indexing: %s" % self.base.type
elif self.base.type.is_cfunction:
return "%s<%s>" % (
self.base.result(),
",".join([param.declaration_code("") for param in self.type_indices]))
else:
if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
return
index_code = "(%s[%s])"
return index_code % (self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, (int, long))
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", %s, %d, %s, %d, %d, %d" % (
self.original_index_type.declaration_code(""),
self.original_index_type.signed and 1 or 0,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_subexpr_evaluation_code(self, code):
self.base.generate_evaluation_code(code)
if self.type_indices is not None:
pass
elif self.indices is None:
self.index.generate_evaluation_code(code)
else:
for i in self.indices:
i.generate_evaluation_code(code)
def generate_subexpr_disposal_code(self, code):
self.base.generate_disposal_code(code)
if self.type_indices is not None:
pass
elif self.indices is None:
self.index.generate_disposal_code(code)
else:
for i in self.indices:
i.generate_disposal_code(code)
def free_subexpr_temps(self, code):
self.base.free_temps(code)
if self.indices is None:
self.index.free_temps(code)
else:
for i in self.indices:
i.free_temps(code)
def generate_result_code(self, code):
if self.is_buffer_access or self.memslice_index:
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.temp_code,
self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.temp_code)
elif self.memslice_slice:
self.put_memoryviewslice_slice_code(code)
elif self.is_temp:
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
code.globalstate.use_utility_code(
UtilityCode.load_cached("DictGetItem", "ObjectHandling.c"))
else:
function = "PyObject_GetItem"
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c"))
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c"))
else:
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
if self.index.type.is_int:
index_code = self.index.result()
else:
index_code = self.index.py_result()
code.putln(
"%s = %s(%s, %s%s); if (unlikely(%s == %s)) %s;" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
self.result(),
error_value,
code.error_goto(self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
if self.base.type is bytearray_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
function = "__Pyx_SetItemInt_ByteArray"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
function = "__Pyx_SetItemInt"
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(
"if (unlikely(%s(%s, %s, %s%s) < 0)) %s" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
def generate_buffer_setitem_code(self, rhs, code, op=""):
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_assignment_code(self, rhs, code):
generate_evaluation_code = (self.is_memslice_scalar_assignment or
self.memslice_slice)
if generate_evaluation_code:
self.generate_evaluation_code(code)
else:
self.generate_subexpr_evaluation_code(code)
if self.is_buffer_access or self.memslice_index:
self.generate_buffer_setitem_code(rhs, code)
elif self.is_memslice_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
elif self.memslice_slice or self.is_memslice_copy:
self.generate_memoryviewslice_setslice_code(rhs, code)
elif self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
elif self.base.type is bytearray_type:
value_code = self._check_byte_value(code, rhs)
self.generate_setitem_code(value_code, code)
else:
code.putln(
"%s = %s;" % (
self.result(), rhs.result()))
if generate_evaluation_code:
self.generate_disposal_code(code)
else:
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def _check_byte_value(self, code, rhs):
# TODO: should we do this generally on downcasts, or just here?
assert rhs.type.is_int, repr(rhs.type)
value_code = rhs.result()
if rhs.has_constant_result():
if 0 <= rhs.constant_result < 256:
return value_code
needs_cast = True # make at least the C compiler happy
warning(rhs.pos,
"value outside of range(0, 256)"
" when assigning to byte: %s" % rhs.constant_result,
level=1)
else:
needs_cast = rhs.type != PyrexTypes.c_uchar_type
if not self.nogil:
conditions = []
if rhs.is_literal or rhs.type.signed:
conditions.append('%s < 0' % value_code)
if (rhs.is_literal or not
(rhs.is_temp and rhs.type in (
PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
PyrexTypes.c_schar_type))):
conditions.append('%s > 255' % value_code)
if conditions:
code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
code.putln(
'PyErr_SetString(PyExc_ValueError,'
' "byte must be in range(0, 256)"); %s' %
code.error_goto(self.pos))
code.putln("}")
if needs_cast:
value_code = '((unsigned char)%s)' % value_code
return value_code
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(
"if (%s(%s, %s%s) < 0) %s" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto(self.pos)))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def buffer_entry(self):
import Buffer, MemoryView
base = self.base
if self.base.is_nonecheck:
base = base.arg
if base.is_name:
entry = base.entry
else:
# SimpleCallNode is_simple is not consistent with coerce_to_simple
assert base.is_simple() or base.is_temp
cname = base.result()
entry = Symtab.Entry(cname, cname, self.base.type, self.base.pos)
if entry.type.is_buffer:
buffer_entry = Buffer.BufferEntry(entry)
else:
buffer_entry = MemoryView.MemoryViewSliceBufferEntry(entry)
return buffer_entry
def buffer_lookup_code(self, code):
"ndarray[1, 2, 3] and memslice[1, 2, 3]"
# Assign indices to temps
index_temps = [code.funcstate.allocate_temp(i.type, manage_ref=False)
for i in self.indices]
for temp, index in zip(index_temps, self.indices):
code.putln("%s = %s;" % (temp, index.result()))
# Generate buffer access code using these temps
import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[i.type.signed for i in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def put_memoryviewslice_slice_code(self, code):
"memslice[:]"
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
if sys.version_info < (3,):
def next_(it):
return it.next()
else:
next_ = next
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
is_slice = isinstance(index, SliceNode)
have_slices = have_slices or is_slice
if is_slice:
if not index.start.is_none:
index.start = next_(it)
if not index.stop.is_none:
index.stop = next_(it)
if not index.step.is_none:
index.step = next_(it)
else:
next_(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(code, self.original_indices,
self.result(),
have_gil=have_gil,
have_slices=have_slices,
directives=code.globalstate.directives)
def generate_memoryviewslice_setslice_code(self, rhs, code):
"memslice1[...] = memslice2 or memslice1[:] = memslice2"
import MemoryView
MemoryView.copy_broadcast_memview_src_to_dst(rhs, self, code)
def generate_memoryviewslice_assign_scalar_code(self, rhs, code):
"memslice1[...] = 0.0 or memslice1[:] = 0.0"
import MemoryView
MemoryView.assign_scalar(self, rhs, code)
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception, e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
if self.start:
self.start = self.start.coerce_to(c_int, env)
if self.stop:
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
if (dst_type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_char_ptr_type:
base_result = '((const char*)%s)' % base_result
if self.type is bytearray_type:
type_name = 'ByteArray'
else:
type_name = self.type.name.title()
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
type_name,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
type_name,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = '__Pyx_PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = ''
if self.start:
start_offset = self.start_code()
if start_offset == '0':
start_offset = ''
else:
start_offset += '+'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
error(self.pos,
"Slice assignments from pointers are not yet supported.")
# FIXME: fix the array size according to start/stop
array_length = self.base.type.size
for i in range(array_length):
code.putln("%s[%s%s] = %s[%d];" % (
self.base.result(), start_offset, i,
rhs.result(), i))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
slice_size = self.base.type.size + stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
start = self.base.type.size + start
slice_size -= start
start = None
except ValueError:
pass
check = None
if slice_size < 0:
if target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif start is None and stop is None:
# we know the exact slice length
if target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %d, got %d" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
check = "(%s)-(%s)" % (stop, start)
else: # stop is not None:
check = stop
if check:
code.putln("if (unlikely((%s) != %d)) {" % (check, target_size))
code.putln('PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length, expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d", (Py_ssize_t)%d, (Py_ssize_t)(%s));' % (
target_size, check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return "<unused>"
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
type = slice_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception, e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
def __deepcopy__(self, memo):
"""
There is a copy bug in python 2.4 for slice objects.
"""
return SliceNode(
self.pos,
start=copy.deepcopy(self.start, memo),
stop=copy.deepcopy(self.stop, memo),
step=copy.deepcopy(self.step, memo),
is_temp=self.is_temp,
is_literal=self.is_literal,
constant_result=self.constant_result)
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
function = self.function
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
return PyrexTypes.CPtrType(function.class_type)
if func_type is py_object_type:
# function might have lied for safety => try to find better type
entry = getattr(function, 'entry', None)
if entry is not None:
func_type = entry.type or func_type
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
func_type = self.function.type
if func_type is type_type and self.function.is_name:
entry = self.function.entry
if entry.type.is_extension_type:
return False
if (entry.type.is_builtin_type and
entry.name in Builtin.types_that_construct_their_instance):
return False
return ExprNode.may_be_none(self)
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("<init>")
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.declaration_code(""))
self.analyse_c_function_call(env)
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception, e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
if func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env)
self.args = None
if func_type is Builtin.type_type and function.is_name and \
function.entry and \
function.entry.is_builtin and \
function.entry.name in Builtin.types_that_construct_their_instance:
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As
# long as we do not support __new__(), the result type
# is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
if self.function.type is error_type:
self.type = error_type
return
if self.self:
args = [self.self] + self.args
else:
args = self.args
if self.function.type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif (isinstance(self.function, IndexNode) and
self.function.is_fused_index):
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(args, alternatives, self.pos, env)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
entry.used = True
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%s'",
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in xrange(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in xrange(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None \
or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return "<error>"
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def generate_result_code(self, code):
func_type = self.function_type()
if self.function.is_name or self.function.is_attribute:
if self.function.entry and self.function.entry.utility_code:
code.globalstate.use_utility_code(self.function.entry.utility_code)
if func_type.is_pyobject:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
else:
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), exc_val))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
if func_type.exception_value is None:
raise_py_exception = "__Pyx_CppExn2PyErr();"
elif func_type.exception_value.type.is_pyobject:
raise_py_exception = 'try { throw; } catch(const std::exception& exn) { PyErr_SetString(%s, exn.what()); } catch(...) { PyErr_SetNone(%s); }' % (
func_type.exception_value.entry.cname,
func_type.exception_value.entry.cname)
else:
raise_py_exception = '%s(); if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError , "Error converting c++ exception.");' % func_type.exception_value.entry.cname
code.putln("try {")
code.putln("%s%s;" % (lhs, rhs))
code.putln("} catch(...) {")
if self.nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if self.nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in xrange(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in xrange(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception, e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not isinstance(self.keyword_args, DictNode) or
not isinstance(self.positional_args, TupleNode)):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
function = self.function
if function.is_name and function.type_entry:
# We are calling an extension type constructor. As long
# as we do not support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not isinstance(self.keyword_args, DictNode):
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from Cython.Compiler.UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception, e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.type = tuple_type
self.is_temp = 1
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
code.putln(
"%s = PySequence_Tuple(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
is_special_lookup = False
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
return node.entry.type
node = self.analyse_as_unbound_cmethod_node(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_unbound_cmethod_node(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
return None
def analyse_as_unbound_cmethod_node(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
type = self.obj.analyse_as_extension_type(env)
if type:
entry = type.scope.lookup_here(self.attribute)
if entry and entry.is_cmethod:
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
ubcm_entry = Symtab.Entry(entry.name,
"%s->%s" % (type.vtabptr_cname, entry.cname),
entry.type)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
return self.as_name_node(env, ubcm_entry, target=False)
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
if (obj_type.is_memoryviewslice and not
obj_type.scope.lookup_here(self.attribute)):
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
if obj_type.can_coerce_to_pyobject(env):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%s'"
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
elif self.type.is_memoryviewslice:
import MemoryView
MemoryView.err_if_nogil_initialized_check(self.pos, env, 'attribute')
gil_message = "Accessing Python attribute"
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return not self.type.is_array
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if self.is_special_lookup:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_LookupSpecial'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_GetAttrStr'
code.putln(
'%s = %s(%s, %s); %s' % (
self.result(),
lookup_func_name,
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod and self.entry.utility_code:
# C method implemented as function call with utility code
code.globalstate.use_utility_code(self.entry.utility_code)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
if self.obj.is_name or (self.obj.is_attribute and
self.obj.is_memslice_transpose):
code.put_xdecref_memoryviewslice(
self.result(), have_gil=True)
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
style, text = 'py_attr', 'python attribute (%s)'
else:
style, text = 'c_attr', 'c attribute (%s)'
code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredTargetNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment targets such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be removed during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
def __init__(self, pos, target):
ExprNode.__init__(self, pos)
self.target = target
def analyse_declarations(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target.analyse_declarations(env)
def analyse_types(self, env):
error(self.pos, "can use starred expression only as assignment target")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False # trade speed for code size (e.g. use PyTuple_Pack())
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i in range(len(self.args)):
arg = self.args[i]
if not skip_children: arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(Builtin.list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = Builtin.list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if isinstance(mult_factor.constant_result, (int,long)) \
and mult_factor.constant_result > 0:
size_factor = ' * %s' % mult_factor.constant_result
else:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
if self.type is Builtin.tuple_type and (self.is_literal or self.slow) and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join([ arg.py_result() for arg in self.args ]),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is Builtin.list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is Builtin.tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in xrange(arg_count):
arg = self.args[i]
if c_mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
code.put_giveref(arg.py_result())
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif self.type is Builtin.tuple_type and (self.is_literal or self.slow):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
code.putln("Py_ssize_t size = Py_SIZE(sequence);")
code.putln("#else")
code.putln("Py_ssize_t size = PySequence_Size(sequence);") # < 0 => exception
code.putln("#endif")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_COMPILING_IN_CPYTHON")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result())
else:
code.putln("{")
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if sequence_type_test == '1':
code.putln("}") # all done
elif sequence_type_test == none_check:
# either tuple/list or None => save some code by generating the error directly
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("}") # all done
else:
code.putln("} else {") # needs iteration fallback code
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln("}")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('%s = %s;' % (sublist_temp, sublist_temp)) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
node = self
node.is_temp = False
node.is_literal = True
else:
node = SequenceNode.analyse_types(self, env, skip_children)
for child in node.args:
if not child.is_literal:
break
else:
if not node.mult_factor or node.mult_factor.is_literal and \
isinstance(node.mult_factor.constant_result, (int, long)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_partly_literal:
# underlying tuple is const, but factor is not
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
const_code = code.get_cached_constants_writer()
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=True)
const_code.put_giveref(tuple_target)
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
elif self.is_literal:
# non-empty cached tuple => result is global constant,
# creation code goes into separate code writer
self.result_code = code.get_py_const(py_object_type, 'tuple', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
self.generate_sequence_packing_code(code)
code.put_giveref(self.py_result())
else:
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
hold_errors()
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = held_errors()
release_errors(ignore=True)
if env.is_module_scope:
self.in_module_scope = True
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_ptr and dst_type.base_type is not PyrexTypes.c_void_type:
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, len(self.args))
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too may members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array and self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True)
else:
SequenceNode.allocate_temp_result(self, code)
def release_temp_result(self, env):
if self.type.is_array:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
pass
else:
SequenceNode.release_temp_result(self, env)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
for i, arg in enumerate(self.args):
code.putln("%s[%s] = %s;" % (
self.result(),
i,
arg.result()))
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for entry in self.expr_scope.var_entries:
if not entry.in_closure:
code.put_var_declaration(entry)
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = tuple(code.new_loop_labels())
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
for entry in py_entries:
code.put_var_decref(entry)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
for entry in py_entries:
code.put_var_decref(entry)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ScopedExprNode):
# An inlined generator expression for which the result is
# calculated inside of the loop. This will only be created by
# transforms when replacing builtin calls on generator
# expressions.
#
# loop ForStatNode the for-loop, not containing any YieldExprNodes
# result_node ResultRefNode the reference to the result value temp
# orig_func String the name of the builtin function this node replaces
child_attrs = ["loop"]
loop_analysed = False
type = py_object_type
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def may_be_none(self):
return False
def annotate(self, code):
self.loop.annotate(code)
def infer_type(self, env):
return self.result_node.infer_type(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop_analysed = True
self.loop = self.loop.analyse_expressions(env)
self.type = self.result_node.type
self.is_temp = True
return self
def analyse_scoped_expressions(self, env):
self.loop_analysed = True
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def coerce_to(self, dst_type, env):
if self.orig_func == 'sum' and dst_type.is_numeric and not self.loop_analysed:
# We can optimise by dropping the aggregation variable and
# the add operations into C. This can only be done safely
# before analysing the loop body, after that, the result
# reference type will have infected expressions and
# assignments.
self.result_node.type = self.type = dst_type
return self
return super(InlinedGeneratorExpressionNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
self.result_node.result_code = self.result()
self.loop.generate_execution_code(code)
class SetNode(ExprNode):
# Set constructor.
type = set_type
subexprs = ['args']
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception, e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(Builtin.py_set_utility_code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
arg.generate_evaluation_code(code)
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception, e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TOOD: Infer struct constructors.
return dict_type
def analyse_types(self, env):
hold_errors()
self.key_value_pairs = [ item.analyse_types(env)
for item in self.key_value_pairs ]
self.obj_conversion_errors = held_errors()
release_errors(ignore=True)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="<error>")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
if self.type.is_pyobject:
self.release_errors()
code.putln(
"%s = PyDict_New(); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if self.type.is_pyobject:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
function = 'PyDict_Keys'
else:
function = 'PyMapping_Keys'
code.putln('%s = %s(%s); %s' % (
self.result(), function, dict_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# bases ExprNode Base class tuple
# dict ExprNode Class dict (not owned by this node)
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['bases', 'doc']
def analyse_types(self, env):
self.bases = self.bases.analyse_types(env)
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
self.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
self.bases.py_result(),
self.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# dict ExprNode Class dict (not owned by this node)
# module_name EncodedString Name of defining module
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = 'NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
self.bases.py_result(),
self.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class KeywordArgsNode(ExprNode):
# Helper class for keyword arguments.
#
# starstar_arg DictNode
# keyword_args [DictItemNode]
subexprs = ['starstar_arg', 'keyword_args']
is_temp = 1
type = dict_type
def calculate_constant_result(self):
result = dict(self.starstar_arg.constant_result)
for item in self.keyword_args:
key, value = item.constant_result
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = self.starstar_arg.compile_time_value(denv)
pairs = [ (item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.keyword_args ]
try:
result = dict(result)
for key, value in pairs:
if key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception, e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
arg = self.starstar_arg.analyse_types(env)
arg = arg.coerce_to_pyobject(env)
self.starstar_arg = arg.as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
self.keyword_args = [ item.analyse_types(env)
for item in self.keyword_args ]
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.starstar_arg.generate_evaluation_code(code)
if self.starstar_arg.type is not Builtin.dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_Check(%s))) {' %
self.starstar_arg.py_result())
if self.keyword_args:
code.putln(
"%s = PyDict_Copy(%s); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
code.putln("%s = %s;" % (
self.result(),
self.starstar_arg.py_result()))
code.put_incref(self.result(), py_object_type)
if self.starstar_arg.type is not Builtin.dict_type:
code.putln('} else {')
code.putln(
"%s = PyObject_CallFunctionObjArgs("
"(PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
self.starstar_arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln('}')
self.starstar_arg.generate_disposal_code(code)
self.starstar_arg.free_temps(code)
if not self.keyword_args:
return
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
for item in self.keyword_args:
item.generate_evaluation_code(code)
code.putln("if (unlikely(PyDict_GetItem(%s, %s))) {" % (
self.result(),
item.key.py_result()))
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
item.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(self.pos,
"PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
def annotate(self, code):
self.starstar_arg.annotate(code)
for item in self.keyword_args:
item.annotate(code)
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# bases ExprNode Base class tuple (not owned by this node)
# mkw ExprNode Class keyword arguments (not owned by this node)
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
if self.mkw:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
self.bases.result(),
self.mkw.result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
self.bases.result())
code.putln(
"%s = %s; %s" % (
self.result(), call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# metaclass ExprNode Metaclass object
# bases ExprNode Base class tuple
# mkw ExprNode Class keyword arguments
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
if self.doc:
doc_code = self.doc.result()
else:
doc_code = '(PyObject *) NULL'
if self.mkw:
mkw = self.mkw.py_result()
else:
mkw = '(PyObject *) NULL'
if self.metaclass:
metaclass = self.metaclass.result()
else:
metaclass = "(PyObject *) NULL"
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
self.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
if self.is_active:
env.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
return self
def generate_evaluation_code(self, code):
if self.is_active:
self.allocate_temp_result(code)
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
if self.is_active:
code.putln('__Pyx_CyFunction_InitClassCell(%s, %s);' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class BoundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an bound method
# object from a class and a function.
#
# function ExprNode Function object
# self_object ExprNode self object
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
self.type = py_object_type
self.is_temp = 1
return self
gil_message = "Constructing a bound method"
def generate_result_code(self, code):
code.putln(
"%s = PyMethod_New(%s, %s, (PyObject*)%s->ob_type); %s" % (
self.result(),
self.function.py_result(),
self.self_object.py_result(),
self.self_object.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class UnboundMethodNode(ExprNode):
# Helper class used in the implementation of Python
# class definitions. Constructs an unbound method
# object from a class and a function.
#
# function ExprNode Function object
type = py_object_type
is_temp = 1
subexprs = ['function']
def analyse_types(self, env):
self.function = self.function.analyse_types(env)
return self
def may_be_none(self):
return False
gil_message = "Constructing an unbound method"
def generate_result_code(self, code):
class_cname = code.pyclass_stack[-1].classobj.result()
code.putln(
"%s = PyMethod_New(%s, 0, %s); %s" % (
self.result(),
self.function.py_result(),
class_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
for arg in self.def_node.args:
if arg.default:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = arg.annotation.analyse_types(env)
if not arg.annotation.type.is_pyobject:
arg.annotation = arg.annotation.coerce_to_pyobject(env)
annotations.append((arg.pos, arg.name, arg.annotation))
if self.def_node.return_type_annotation:
annotations.append((self.def_node.return_type_annotation.pos,
StringEncoding.EncodedString("return"),
self.def_node.return_type_annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
else:
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
defaults_getter.analyse_declarations(env)
defaults_getter = defaults_getter.analyse_expressions(env)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_NewEx"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_NewEx"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.self_result_code(),
self.get_py_mod_name(code),
"PyModule_GetDict(%s)" % Naming.module_cname,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter:
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer()
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.BytesLiteral(func.pos[0].get_filenametable_entry().encode('utf8'))
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
flags = []
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args) - func.num_kwonly_args, # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('<lambda>')
def analyse_declarations(self, env):
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
def analyse_types(self, env):
if not self.label_num:
error(self.pos, "'yield' not supported here")
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
label_num, label_name = code.new_yield_label()
code.use_label(label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
code.put_finish_refcount_context()
code.putln("/* return from generator, yielding value */")
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname)
code.putln(code.error_goto_if_null(Naming.sent_value_cname, self.pos))
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
class YieldFromExprNode(YieldExprNode):
# "yield from GEN" expression
is_yield_from = True
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("YieldFrom", "Generator.c"))
self.arg.generate_evaluation_code(code)
code.putln("%s = __Pyx_Generator_Yield_From(%s, %s);" % (
Naming.retval_cname,
Naming.generator_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
# YieldExprNode has allocated the result temp for us
code.putln("%s = NULL;" % self.result())
code.putln("if (unlikely(__Pyx_PyGen_FetchStopIterationValue(&%s) < 0)) %s" % (
self.result(),
code.error_goto(self.pos)))
code.put_gotref(self.result())
else:
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration ||"
" PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("}")
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def may_be_none(self):
if self.operand.type and self.operand.type.is_builtin_type:
if self.operand.type is not type_type:
return False
return ExprNode.may_be_none(self)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
def generate_py_operation_code(self, code):
function = self.py_operation_function()
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env):
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception, e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if not cpp_type:
error(self.pos, "'!' operator not defined for %s" % operand_type)
self.type = PyrexTypes.error_type
return
self.type = cpp_type
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
def generate_result_code(self, code):
pass
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
cpp_type = argtype.find_cpp_operation_type(self.operator)
if cpp_type is not None:
self.type = cpp_type
return self
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue")
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python variable")
return self
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = "<error>"
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
pass
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
base_type = self.base_type.analyse(env)
self.operand = self.operand.coerce_to(base_type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
<int[:M:1, :N]> p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles <int[:, :]> my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
MemoryView.validate_memslice_dtype(self.pos, array_dtype)
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=str(dimsize),
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated mulitple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
return env.global_scope().context.cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.declaration_code("")
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s);" %
(format_temp, type_info))
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s);' % (
shapes_temp, buildvalue_fmt, ", ".join(shapes)))
err = "!%s || !%s || !PyBytes_AsString(%s)" % (format_temp,
shapes_temp,
format_temp)
code.putln(code.error_goto_if(err, self.pos))
code.put_gotref(format_temp)
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
self.operand = operand
self.__class__ = SizeofVarNode
node = self.analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.declaration_code("")
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env))
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2):
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
if type1.is_builtin_type or type2.is_builtin_type:
if type1 is type2 and self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
result_type = self.infer_builtin_types_operation(type1, type2)
if result_type is not None:
return result_type
return py_object_type
else:
return self.compute_c_result_type(type1, type2)
def infer_builtin_types_operation(self, type1, type2):
return None
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_result_code(self, code):
#print "BinopNode.generate_result_code:", self.operand1, self.operand2 ###
if self.operand1.type.is_pyobject:
function = self.py_operation_function()
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.is_temp:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
# FIXME: handle the reversed case?
#if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
# cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.infix:
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power"
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
string_types = [bytes_type, str_type, basestring_type, unicode_type] # Py2.4 lacks tuple.index()
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
return None
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
def py_operation_function(self):
type1, type2 = self.operand1.type, self.operand2.type
if type1 is unicode_type or type2 is unicode_type:
if type1.is_builtin_type and type2.is_builtin_type:
if self.operand1.may_be_none() or self.operand2.may_be_none():
return '__Pyx_PyUnicode_ConcatSafe'
else:
return '__Pyx_PyUnicode_Concat'
return super(AddNode, self).py_operation_function()
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or
(type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
string_types = (bytes_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
return type2
# multiplication of containers/numbers with an integer value
# always (?) returns the same type
if type1.is_int:
return type2
if type2.is_int:
return type1
return None
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, (int,long)) and isinstance(op2, (int,long)):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
def analyse_operation(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (code.globalstate.directives['cdivision']
or not self.type.signed
or self.type.is_float)
if not self.cdivision:
code.globalstate.use_utility_code(div_int_utility_code.specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(division_overflow_test_code)
if self.operand2.type.signed == 2:
# explicitly signed, no runtime check needed
minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
else:
type_of_op2 = self.operand2.type.declaration_code('')
minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
type_of_op2, self.operand2.result(), type_of_op2)
code.putln("else if (sizeof(%s) == sizeof(long) && %s "
" && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.declaration_code(''),
minus1_check,
self.operand1.result()))
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(cdivision_warning_utility_code)
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("if (__Pyx_cdivision_warning(%(FILENAME)s, "
"%(LINENO)s)) {" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
})
code.put_release_ensured_gil()
code.put_goto(code.error_label)
code.putln("}")
code.put_release_ensured_gil()
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex:
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
# b'%s' % xyz raises an exception in Py3, so it's safe to infer the type for Py2
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
return type1
elif type1 in (bytes_type, str_type, basestring_type):
if type2 is unicode_type:
return type2
elif type2.is_numeric:
return type1
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
return basestring_type # either str or unicode, can't tell
return None
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
mod_int_utility_code.specialize(self.type))
else: # float
code.globalstate.use_utility_code(
mod_float_utility_code.specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# note: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def py_operation_function(self):
if self.operand1.type is unicode_type:
if self.operand1.may_be_none():
return '__Pyx_PyUnicode_FormatSafe'
else:
return 'PyUnicode_Format'
elif self.operand1.type is str_type:
if self.operand1.may_be_none():
return '__Pyx_PyString_FormatSafe'
else:
return '__Pyx_PyString_Format'
return super(ModNode, self).py_operation_function()
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = "__Pyx_c_pow" + self.type.real_type.math_h_modifier
else:
error(self.pos, "complex int powers not supported")
self.pow_func = "<error>"
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.declaration_code('').replace(' ', '_')
env.use_utility_code(
int_pow_utility_code.specialize(
func_name=self.pow_func,
type=self.type.declaration_code(''),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
# Note: This class is temporarily "shut down" into an ineffective temp
# allocation mode.
#
# More sophisticated temp reuse was going on before, one could have a
# look at adding this again after /all/ classes are converted to the
# new temp scheme. (The temp juggling cannot work otherwise).
class BoolBinopNode(ExprNode):
# Short-circuiting boolean operation.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
subexprs = ['operand1', 'operand2']
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
if self.operator == 'and':
self.constant_result = \
self.operand1.constant_result and \
self.operand2.constant_result
else:
self.constant_result = \
self.operand1.constant_result or \
self.operand2.constant_result
def compile_time_value(self, denv):
if self.operator == 'and':
return self.operand1.compile_time_value(denv) \
and self.operand2.compile_time_value(denv)
else:
return self.operand1.compile_time_value(denv) \
or self.operand2.compile_time_value(denv)
def coerce_to_boolean(self, env):
return BoolBinopNode(
self.pos,
operator = self.operator,
operand1 = self.operand1.coerce_to_boolean(env),
operand2 = self.operand2.coerce_to_boolean(env),
type = PyrexTypes.c_bint_type,
is_temp = self.is_temp)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(self.operand1.type, self.operand2.type)
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
# For what we're about to do, it's vital that
# both operands be temp nodes.
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
self.is_temp = 1
return self
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.operand1.generate_evaluation_code(code)
test_result, uses_temp = self.generate_operand1_test(code)
if self.operator == 'and':
sense = ""
else:
sense = "!"
code.putln(
"if (%s%s) {" % (
sense,
test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
self.operand1.generate_disposal_code(code)
self.operand2.generate_evaluation_code(code)
self.allocate_temp_result(code)
self.operand2.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), self.operand2.result()))
self.operand2.generate_post_assignment_code(code)
self.operand2.free_temps(code)
code.putln("} else {")
self.operand1.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), self.operand1.result()))
self.operand1.generate_post_assignment_code(code)
self.operand1.free_temps(code)
code.putln("}")
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(PyrexTypes.c_bint_type,
manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
self.is_temp = 1
return self.analyse_result_type(env)
def analyse_result_type(self, env):
self.type = PyrexTypes.independent_spanning_type(
self.true_val.type, self.false_val.type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type == PyrexTypes.error_type:
self.type_error()
return self
def coerce_to(self, dst_type, env):
self.true_val = self.true_val.coerce_to(dst_type, env)
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
return self.analyse_result_type(env)
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result() )
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
if (isinstance(operand1_result, (bytes, unicode)) and
isinstance(operand2_result, (bytes, unicode)) and
type(operand1_result) != type(operand2_result)):
# string comparison of different types isn't portable
return
if self.operator in ('in', 'not_in'):
if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
if not self.operand2.args:
self.constant_result = self.operator == 'not_in'
return
elif isinstance(self.operand2, ListNode) and not self.cascade:
# tuples are more efficient to store than lists
self.operand2 = self.operand2.as_tuple()
elif isinstance(self.operand2, DictNode):
if not self.operand2.key_value_pairs:
self.constant_result = self.operator == 'not_in'
return
self.constant_result = func(operand1_result, operand2_result)
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception, e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or \
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type)):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if op not in ('==', '!=') \
and (type1.is_complex or type1.is_numeric) \
and (type2.is_complex or type2.is_numeric):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = type1
elif type2.is_pyobject:
new_common_type = type2
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_Contains"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Contains"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_Contains"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
code.putln("%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2))
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
assert not self.cascade
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = "<error>"
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.is_pycmp = False
self.type = func_type.return_type
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def coerce_to_boolean(self, env):
if self.is_pycmp:
# coercing to bool => may allow for more efficient comparison code
if self.find_special_bool_compare_function(
env, self.operand1, result_is_bool=True):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_temp = 1
if self.cascade:
operand2 = self.cascade.optimise_comparison(
self.operand2, env, result_is_bool=True)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return self
# TODO: check if we can optimise parts of the cascade here
return ExprNode.coerce_to_boolean(self, env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
if self.operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
self.operand1.type.binary_op('=='),
self.operand1.result(),
self.operand2.result())
elif self.is_c_string_contains():
if self.operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
self.operand2.result(),
self.operand1.result())
else:
result1 = self.operand1.result()
result2 = self.operand2.result()
if self.is_memslice_nonecheck:
if self.operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode
}
def binop_node(pos, operator, operand1, operand2, inplace=False):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](pos,
operator = operator,
operand1 = operand1,
operand2 = operand2,
inplace = inplace)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(
style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.env = env
self.use_managed_ref = True
self.arg = arg
def generate_result_code(self, code):
self.type.create_from_py_utility_code(self.env)
code.putln("%s = %s(%s);" % (self.result(),
self.type.from_py_function,
self.arg.py_result()))
error_cond = self.type.error_condition(self.result())
code.putln(code.error_goto_if(error_cond, self.pos))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
elif arg.type.is_string or arg.type.is_cpp_string:
if (type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = type
else:
# FIXME: check that the target type and the resulting type are compatible
pass
if arg.type.is_memoryviewslice:
# Register utility codes at this point
arg.type.get_to_py_function(env, arg)
self.env = env
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
arg_type = self.arg.type
if arg_type.is_memoryviewslice:
funccall = arg_type.get_to_py_function(self.env, self.arg)
else:
func = arg_type.to_py_function
if arg_type.is_string or arg_type.is_cpp_string:
if self.type in (bytes_type, str_type, unicode_type):
func = func.replace("Object", self.type.name.title())
elif self.type is bytearray_type:
func = func.replace("Object", "ByteArray")
funccall = "%s(%s)" % (func, self.arg.result())
code.putln('%s = %s; %s' % (
self.result(),
funccall,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_SetString(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_ephemeral():
error(arg.pos,
"Obtaining '%s' from temporary Python value" % result_type)
elif self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
function = self.type.from_py_function
operand = self.arg.py_result()
rhs = "%s(%s)" % (function, operand)
if self.type.is_enum:
rhs = typecast(self.type, c_long_type, rhs)
code.putln('%s = %s; %s' % (
self.result(),
rhs,
code.error_goto_if(self.type.error_condition(self.result()), self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type : 'PyList_GET_SIZE',
Builtin.tuple_type : 'PyTuple_GET_SIZE',
Builtin.bytes_type : 'PyBytes_GET_SIZE',
Builtin.unicode_type : 'PyUnicode_GET_SIZE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
code.putln("%s = (%s != Py_None) && (%s(%s) != 0);" % (
self.result(),
self.arg.py_result(),
test_func,
self.arg.py_result()))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_expressions(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
return self
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
#------------------------------------------------------------------------------------
int_pow_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) {
%(type)s t = b;
switch (e) {
case 3:
t *= b;
case 2:
t *= b;
case 1:
return t;
case 0:
return 1;
}
#if %(signed)s
if (unlikely(e<0)) return 0;
#endif
t = 1;
while (likely(e)) {
t *= (b * (e&1)) | ((~e)&1); /* 1 or b */
b *= b;
e >>= 1;
}
return t;
}
""")
# ------------------------------ Division ------------------------------------
div_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s q = a / b;
%(type)s r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
""")
mod_int_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = a %% b;
r += ((r != 0) & ((r ^ b) < 0)) * b;
return r;
}
""")
mod_float_utility_code = UtilityCode(
proto="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
""",
impl="""
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = fmod%(math_h_modifier)s(a, b);
r += ((r != 0) & ((r < 0) ^ (b < 0))) * b;
return r;
}
""")
cdivision_warning_utility_code = UtilityCode(
proto="""
static int __Pyx_cdivision_warning(const char *, int); /* proto */
""",
impl="""
static int __Pyx_cdivision_warning(const char *filename, int lineno) {
#if CYTHON_COMPILING_IN_PYPY
filename++; // avoid compiler warnings
lineno++;
return PyErr_Warn(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ");
#else
return PyErr_WarnExplicit(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ",
filename,
lineno,
__Pyx_MODULE_NAME,
NULL);
#endif
}
""")
# from intobject.c
division_overflow_test_code = UtilityCode(
proto="""
#define UNARY_NEG_WOULD_OVERFLOW(x) \
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
""")
| Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/cython/src/Cython/Compiler/ExprNodes.py | Python | mit | 440,084 | [
"VisIt"
] | f97fe657f6e63c7397915ba20e2abe2003f33553efd99ccee67ccdfe9bd215a0 |
from DIRAC import gLogger
from dirac.lib.base import *
from dirac.lib.webconfig import gWebConfig
def getSelectedSetup():
if 'setup' in session:
selectedSetup = session[ 'setup' ]
if selectedSetup in gWebConfig.getSetups():
return selectedSetup
else:
return gWebConfig.getDefaultSetup()
else:
return gWebConfig.getDefaultSetup()
def setSelectedSetup( setup ):
if setup in gWebConfig.getSetups():
session[ 'setup' ] = setup
session.save()
def getUsername():
if 'username' in session:
return session[ 'username' ]
else:
return "anonymous"
def getUserDN():
if 'DN' in session:
return session[ 'DN' ]
else:
return False
def getAvailableGroups():
if 'availableGroups' in session:
return session[ 'availableGroups' ]
else:
return []
def getSelectedGroup():
if 'group' in session:
return session [ 'group' ]
else:
for group in gWebConfig.getDefaultGroups():
if group in getAvailableGroups():
return group
return "no group"
def setSelectedGroup( group ):
if group in getAvailableGroups():
session[ 'group' ] = group
session.save()
def getDN():
if 'DN' in session:
return session[ 'DN' ]
return ""
| DIRACGrid/DIRACWeb | dirac/lib/sessionManager.py | Python | gpl-3.0 | 1,224 | [
"DIRAC"
] | c0b24aabb92585b8dd2ab91d525ec4e8ded294f7b974a1e7f9742fe25e402517 |
# -*- coding: utf-8 -*-
#
# brette-gerstner-fig-3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Test for the adapting exponential integrate and fire model according to
# Brette and Gerstner (2005) J. Neurophysiology.
# This script reproduces figure 3.d of the paper.
# Note that Brette&Gerstner give the value for b in nA.
# To be consistent with the other parameters in the equations, b must be
# converted to pA (pico Ampere).
import nest
import nest.voltage_trace
import pylab
nest.ResetKernel()
res=0.1
nest.SetKernelStatus({"resolution": res})
neuron=nest.Create("aeif_cond_exp")
nest.SetStatus(neuron, {"V_peak":20., "E_L":-60.0, "a":80.0, "b":80.5, "tau_w": 720.0})
dc=nest.Create("dc_generator")
nest.SetStatus(dc,[{"amplitude":-800.0, "start":0.0, "stop":400.0}])
nest.ConvergentConnect(dc,neuron)
voltmeter= nest.Create("voltmeter")
nest.SetStatus(voltmeter, {"withgid": True, "withtime": True, 'interval':0.1})
nest.Connect(voltmeter,neuron)
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
pylab.axis([0,1000,-85,0])
| gewaltig/cython-neuron | pynest/examples/brette-gerstner-fig-3d.py | Python | gpl-2.0 | 1,700 | [
"NEURON"
] | 18ae848921ac07fcc9d810de081fd9e90f987719bd5f016646eb2913525cbb05 |
"""Base class for mixture models."""
# Author: Wei Xue <xuewei4d@gmail.com>
# Modified by Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
import warnings
from abc import ABCMeta, abstractmethod
from time import time
import numpy as np
from .. import cluster
from ..base import BaseEstimator
from ..base import DensityMixin
from ..externals import six
from ..exceptions import ConvergenceWarning
from ..utils import check_array, check_random_state
from ..utils.fixes import logsumexp
def _check_shape(param, param_shape, name):
"""Validate the shape of the input parameter 'param'.
Parameters
----------
param : array
param_shape : tuple
name : string
"""
param = np.array(param)
if param.shape != param_shape:
raise ValueError("The parameter '%s' should have the shape of %s, "
"but got %s" % (name, param_shape, param.shape))
def _check_X(X, n_components=None, n_features=None):
"""Check the input data X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
n_components : int
Returns
-------
X : array, shape (n_samples, n_features)
"""
X = check_array(X, dtype=[np.float64, np.float32])
if n_components is not None and X.shape[0] < n_components:
raise ValueError('Expected n_samples >= n_components '
'but got n_components = %d, n_samples = %d'
% (n_components, X.shape[0]))
if n_features is not None and X.shape[1] != n_features:
raise ValueError("Expected the input data X have %d features, "
"but got %d features"
% (n_features, X.shape[1]))
return X
class BaseMixture(six.with_metaclass(ABCMeta, DensityMixin, BaseEstimator)):
"""Base class for mixture models.
This abstract class specifies an interface for all mixture classes and
provides basic common methods for mixture models.
"""
def __init__(self, n_components, tol, reg_covar,
max_iter, n_init, init_params, random_state, warm_start,
verbose, verbose_interval):
self.n_components = n_components
self.tol = tol
self.reg_covar = reg_covar
self.max_iter = max_iter
self.n_init = n_init
self.init_params = init_params
self.random_state = random_state
self.warm_start = warm_start
self.verbose = verbose
self.verbose_interval = verbose_interval
def _check_initial_parameters(self, X):
"""Check values of the basic parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
if self.n_components < 1:
raise ValueError("Invalid value for 'n_components': %d "
"Estimation requires at least one component"
% self.n_components)
if self.tol < 0.:
raise ValueError("Invalid value for 'tol': %.5f "
"Tolerance used by the EM must be non-negative"
% self.tol)
if self.n_init < 1:
raise ValueError("Invalid value for 'n_init': %d "
"Estimation requires at least one run"
% self.n_init)
if self.max_iter < 1:
raise ValueError("Invalid value for 'max_iter': %d "
"Estimation requires at least one iteration"
% self.max_iter)
if self.reg_covar < 0.:
raise ValueError("Invalid value for 'reg_covar': %.5f "
"regularization on covariance must be "
"non-negative"
% self.reg_covar)
# Check all the parameters values of the derived class
self._check_parameters(X)
@abstractmethod
def _check_parameters(self, X):
"""Check initial parameters of the derived class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
pass
def _initialize_parameters(self, X, random_state):
"""Initialize the model parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
random_state : RandomState
A random number generator instance.
"""
n_samples, _ = X.shape
if self.init_params == 'kmeans':
resp = np.zeros((n_samples, self.n_components))
label = cluster.KMeans(n_clusters=self.n_components, n_init=1,
random_state=random_state).fit(X).labels_
resp[np.arange(n_samples), label] = 1
elif self.init_params == 'random':
resp = random_state.rand(n_samples, self.n_components)
resp /= resp.sum(axis=1)[:, np.newaxis]
else:
raise ValueError("Unimplemented initialization method '%s'"
% self.init_params)
self._initialize(X, resp)
@abstractmethod
def _initialize(self, X, resp):
"""Initialize the model parameters of the derived class.
Parameters
----------
X : array-like, shape (n_samples, n_features)
resp : array-like, shape (n_samples, n_components)
"""
pass
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
The method fit the model `n_init` times and set the parameters with
which the model has the largest likelihood or lower bound. Within each
trial, the method iterates between E-step and M-step for `max_iter`
times until the change of likelihood or lower bound is less than
`tol`, otherwise, a `ConvergenceWarning` is raised.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
X = _check_X(X, self.n_components)
self._check_initial_parameters(X)
# if we enable warm_start, we will have a unique initialisation
do_init = not(self.warm_start and hasattr(self, 'converged_'))
n_init = self.n_init if do_init else 1
max_lower_bound = -np.infty
self.converged_ = False
random_state = check_random_state(self.random_state)
n_samples, _ = X.shape
for init in range(n_init):
self._print_verbose_msg_init_beg(init)
if do_init:
self._initialize_parameters(X, random_state)
self.lower_bound_ = -np.infty
for n_iter in range(self.max_iter):
prev_lower_bound = self.lower_bound_
log_prob_norm, log_resp = self._e_step(X)
self._m_step(X, log_resp)
self.lower_bound_ = self._compute_lower_bound(
log_resp, log_prob_norm)
change = self.lower_bound_ - prev_lower_bound
self._print_verbose_msg_iter_end(n_iter, change)
if abs(change) < self.tol:
self.converged_ = True
break
self._print_verbose_msg_init_end(self.lower_bound_)
if self.lower_bound_ > max_lower_bound:
max_lower_bound = self.lower_bound_
best_params = self._get_parameters()
best_n_iter = n_iter
if not self.converged_:
warnings.warn('Initialization %d did not converge. '
'Try different init parameters, '
'or increase max_iter, tol '
'or check for degenerate data.'
% (init + 1), ConvergenceWarning)
self._set_parameters(best_params)
self.n_iter_ = best_n_iter
return self
def _e_step(self, X):
"""E step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob_norm : float
Mean of the logarithms of the probabilities of each sample in X
log_responsibility : array, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
log_prob_norm, log_resp = self._estimate_log_prob_resp(X)
return np.mean(log_prob_norm), log_resp
@abstractmethod
def _m_step(self, X, log_resp):
"""M step.
Parameters
----------
X : array-like, shape (n_samples, n_features)
log_resp : array-like, shape (n_samples, n_components)
Logarithm of the posterior probabilities (or responsibilities) of
the point of each sample in X.
"""
pass
@abstractmethod
def _check_is_fitted(self):
pass
@abstractmethod
def _get_parameters(self):
pass
@abstractmethod
def _set_parameters(self, params):
pass
def score_samples(self, X):
"""Compute the weighted log probabilities for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_prob : array, shape (n_samples,)
Log probabilities of each data point in X.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return logsumexp(self._estimate_weighted_log_prob(X), axis=1)
def score(self, X, y=None):
"""Compute the per-sample average log-likelihood of the given data X.
Parameters
----------
X : array-like, shape (n_samples, n_dimensions)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
log_likelihood : float
Log likelihood of the Gaussian mixture given X.
"""
return self.score_samples(X).mean()
def predict(self, X):
"""Predict the labels for the data samples in X using trained model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
labels : array, shape (n_samples,)
Component labels.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
return self._estimate_weighted_log_prob(X).argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of each component given the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
resp : array, shape (n_samples, n_components)
Returns the probability of each Gaussian (state) in
the model given each sample.
"""
self._check_is_fitted()
X = _check_X(X, None, self.means_.shape[1])
_, log_resp = self._estimate_log_prob_resp(X)
return np.exp(log_resp)
def sample(self, n_samples=1):
"""Generate random samples from the fitted Gaussian distribution.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_samples, n_features)
Randomly generated sample
y : array, shape (nsamples,)
Component labels
"""
self._check_is_fitted()
if n_samples < 1:
raise ValueError(
"Invalid value for 'n_samples': %d . The sampling requires at "
"least one sample." % (self.n_components))
_, n_features = self.means_.shape
rng = check_random_state(self.random_state)
n_samples_comp = rng.multinomial(n_samples, self.weights_)
if self.covariance_type == 'full':
X = np.vstack([
rng.multivariate_normal(mean, covariance, int(sample))
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp)])
elif self.covariance_type == "tied":
X = np.vstack([
rng.multivariate_normal(mean, self.covariances_, int(sample))
for (mean, sample) in zip(
self.means_, n_samples_comp)])
else:
X = np.vstack([
mean + rng.randn(sample, n_features) * np.sqrt(covariance)
for (mean, covariance, sample) in zip(
self.means_, self.covariances_, n_samples_comp)])
y = np.concatenate([j * np.ones(sample, dtype=int)
for j, sample in enumerate(n_samples_comp)])
return (X, y)
def _estimate_weighted_log_prob(self, X):
"""Estimate the weighted log-probabilities, log P(X | Z) + log weights.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
weighted_log_prob : array, shape (n_features, n_component)
"""
return self._estimate_log_prob(X) + self._estimate_log_weights()
@abstractmethod
def _estimate_log_weights(self):
"""Estimate log-weights in EM algorithm, E[ log pi ] in VB algorithm.
Returns
-------
log_weight : array, shape (n_components, )
"""
pass
@abstractmethod
def _estimate_log_prob(self, X):
"""Estimate the log-probabilities log P(X | Z).
Compute the log-probabilities per each component for each sample.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob : array, shape (n_samples, n_component)
"""
pass
def _estimate_log_prob_resp(self, X):
"""Estimate log probabilities and responsibilities for each sample.
Compute the log probabilities, weighted log probabilities per
component and responsibilities for each sample in X with respect to
the current state of the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
log_prob_norm : array, shape (n_samples,)
log p(X)
log_responsibilities : array, shape (n_samples, n_components)
logarithm of the responsibilities
"""
weighted_log_prob = self._estimate_weighted_log_prob(X)
log_prob_norm = logsumexp(weighted_log_prob, axis=1)
with np.errstate(under='ignore'):
# ignore underflow
log_resp = weighted_log_prob - log_prob_norm[:, np.newaxis]
return log_prob_norm, log_resp
def _print_verbose_msg_init_beg(self, n_init):
"""Print verbose message on initialization."""
if self.verbose == 1:
print("Initialization %d" % n_init)
elif self.verbose >= 2:
print("Initialization %d" % n_init)
self._init_prev_time = time()
self._iter_prev_time = self._init_prev_time
def _print_verbose_msg_iter_end(self, n_iter, diff_ll):
"""Print verbose message on initialization."""
if n_iter % self.verbose_interval == 0:
if self.verbose == 1:
print(" Iteration %d" % n_iter)
elif self.verbose >= 2:
cur_time = time()
print(" Iteration %d\t time lapse %.5fs\t ll change %.5f" % (
n_iter, cur_time - self._iter_prev_time, diff_ll))
self._iter_prev_time = cur_time
def _print_verbose_msg_init_end(self, ll):
"""Print verbose message on the end of iteration."""
if self.verbose == 1:
print("Initialization converged: %s" % self.converged_)
elif self.verbose >= 2:
print("Initialization converged: %s\t time lapse %.5fs\t ll %.5f" %
(self.converged_, time() - self._init_prev_time, ll))
| raghavrv/scikit-learn | sklearn/mixture/base.py | Python | bsd-3-clause | 16,647 | [
"Gaussian"
] | 4c4a3bc20e4120fe2adf2030b30a43a6aa8c5840a5501fed64427ae89e4c9b20 |
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
'''
Blocks and utilities for digital modulation and demodulation.
'''
# The presence of this file turns this directory into a Python package
import os
try:
from .digital_python import *
except ImportError:
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "bindings"))
from .digital_python import *
from gnuradio import analog # just need analog for the enum
class gmskmod_bc(cpmmod_bc):
def __init__(self, samples_per_sym = 2, L = 4, beta = 0.3):
cpmmod_bc.__init__(self, analog.cpm.GAUSSIAN, 0.5, samples_per_sym, L, beta)
from .psk import *
from .qam import *
from .qamlike import *
from .bpsk import *
from .qpsk import *
from .gmsk import *
from .gfsk import *
from .cpm import *
from .crc import *
from .modulation_utils import *
from .ofdm_txrx import ofdm_tx, ofdm_rx
from .soft_dec_lut_gen import *
from .psk_constellations import *
from .qam_constellations import *
from .constellation_map_generator import *
from . import packet_utils
| trabucayre/gnuradio | gr-digital/python/digital/__init__.py | Python | gpl-3.0 | 1,169 | [
"Gaussian"
] | 81f9a86c2cf7d66855e67e9b8298de5ef82349d6cbc051a67bfd108c2cd23152 |
import os
import unittest
import numpy as np
from deepchem.utils.rdkit_utils import load_molecule
from deepchem.utils.rdkit_utils import compute_ring_center
from deepchem.utils.rdkit_utils import compute_ring_normal
from deepchem.utils.noncovalent_utils import is_pi_parallel
from deepchem.utils.noncovalent_utils import is_pi_t
from deepchem.utils.noncovalent_utils import compute_pi_stack
from deepchem.utils.noncovalent_utils import is_cation_pi
from deepchem.utils.noncovalent_utils import compute_cation_pi
from deepchem.utils.noncovalent_utils import compute_binding_pocket_cation_pi
class TestPiInteractions(unittest.TestCase):
def setUp(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
# simple flat ring
from rdkit.Chem import MolFromSmiles
from rdkit.Chem.rdDepictor import Compute2DCoords
self.cycle4 = MolFromSmiles('C1CCC1')
# self.cycle4.Compute2DCoords()
Compute2DCoords(self.cycle4)
# load and sanitize two real molecules
_, self.prot = load_molecule(
os.path.join(current_dir,
'../../feat/tests/data/3ws9_protein_fixer_rdkit.pdb'),
add_hydrogens=False,
calc_charges=False,
sanitize=True)
_, self.lig = load_molecule(
os.path.join(current_dir, '../../feat//tests/data/3ws9_ligand.sdf'),
add_hydrogens=False,
calc_charges=False,
sanitize=True)
def test_compute_ring_center(self):
self.assertTrue(np.allclose(compute_ring_center(self.cycle4, range(4)), 0))
def test_compute_ring_normal(self):
normal = compute_ring_normal(self.cycle4, range(4))
self.assertTrue(
np.allclose(np.abs(normal / np.linalg.norm(normal)), [0, 0, 1]))
def test_is_pi_parallel(self):
ring1_center = np.array([0.0, 0.0, 0.0])
ring2_center_true = np.array([4.0, 0.0, 0.0])
ring2_center_false = np.array([10.0, 0.0, 0.0])
ring1_normal_true = np.array([1.0, 0.0, 0.0])
ring1_normal_false = np.array([0.0, 1.0, 0.0])
for ring2_normal in (np.array([2.0, 0, 0]), np.array([-3.0, 0, 0])):
# parallel normals
self.assertTrue(
is_pi_parallel(ring1_center, ring1_normal_true, ring2_center_true,
ring2_normal))
# perpendicular normals
self.assertFalse(
is_pi_parallel(ring1_center, ring1_normal_false, ring2_center_true,
ring2_normal))
# too far away
self.assertFalse(
is_pi_parallel(ring1_center, ring1_normal_true, ring2_center_false,
ring2_normal))
def test_is_pi_t(self):
ring1_center = np.array([0.0, 0.0, 0.0])
ring2_center_true = np.array([4.0, 0.0, 0.0])
ring2_center_false = np.array([10.0, 0.0, 0.0])
ring1_normal_true = np.array([0.0, 1.0, 0.0])
ring1_normal_false = np.array([1.0, 0.0, 0.0])
for ring2_normal in (np.array([2.0, 0, 0]), np.array([-3.0, 0, 0])):
# perpendicular normals
self.assertTrue(
is_pi_t(ring1_center, ring1_normal_true, ring2_center_true,
ring2_normal))
# parallel normals
self.assertFalse(
is_pi_t(ring1_center, ring1_normal_false, ring2_center_true,
ring2_normal))
# too far away
self.assertFalse(
is_pi_t(ring1_center, ring1_normal_true, ring2_center_false,
ring2_normal))
def test_compute_pi_stack(self):
# order of the molecules shouldn't matter
dicts1 = compute_pi_stack(self.prot, self.lig)
dicts2 = compute_pi_stack(self.lig, self.prot)
for i, j in ((0, 2), (1, 3)):
self.assertEqual(dicts1[i], dicts2[j])
self.assertEqual(dicts1[j], dicts2[i])
# with this criteria we should find both types of stacking
for d in compute_pi_stack(
self.lig, self.prot, dist_cutoff=7, angle_cutoff=40.):
self.assertGreater(len(d), 0)
def test_is_cation_pi(self):
cation_position = np.array([[2.0, 0.0, 0.0]])
ring_center_true = np.array([4.0, 0.0, 0.0])
ring_center_false = np.array([10.0, 0.0, 0.0])
ring_normal_true = np.array([1.0, 0.0, 0.0])
ring_normal_false = np.array([0.0, 1.0, 0.0])
# parallel normals
self.assertTrue(
is_cation_pi(cation_position, ring_center_true, ring_normal_true))
# perpendicular normals
self.assertFalse(
is_cation_pi(cation_position, ring_center_true, ring_normal_false))
# too far away
self.assertFalse(
is_cation_pi(cation_position, ring_center_false, ring_normal_true))
# def test_compute_cation_pi(self):
# # TODO(rbharath): find better example, currently dicts are empty
# dicts1 = compute_cation_pi(self.prot, self.lig)
# dicts2 = compute_cation_pi(self.lig, self.prot)
def test_compute_binding_pocket_cation_pi(self):
# TODO find better example, currently dicts are empty
prot_dict, lig_dict = compute_binding_pocket_cation_pi(self.prot, self.lig)
exp_prot_dict, exp_lig_dict = compute_cation_pi(self.prot, self.lig)
add_lig, add_prot = compute_cation_pi(self.lig, self.prot)
for exp_dict, to_add in ((exp_prot_dict, add_prot), (exp_lig_dict,
add_lig)):
for atom_idx, count in to_add.items():
if atom_idx not in exp_dict:
exp_dict[atom_idx] = count
else:
exp_dict[atom_idx] += count
self.assertEqual(prot_dict, exp_prot_dict)
self.assertEqual(lig_dict, exp_lig_dict)
def test_compute_hydrogen_bonds(self):
pass
| deepchem/deepchem | deepchem/utils/test/test_noncovalent_utils.py | Python | mit | 5,524 | [
"RDKit"
] | 4b53933915655d826d571022b6f5bd20d593655eb9ff3756e7b6a6890cda9854 |
# Creates the data dictionary used in the ps example
from lsst.daf.persistence import Butler
import pickle
n_max=0
stats={}
# output dir of the stack processing
output_dir = ""
butler = Butler(output_dir)
for dataref in butler.subset(datasetType='src'):
if dataref.datasetExists(): # processrCcd did not fail
visit = dataref.dataId['visit']
src = dataref.get()
n = len(src)
print visit, n
if visit in stats:
stats[visit].append(n)
else:
stats[visit]=list()
stats[visit].append(n)
pickle.dump(stats, open( "../test/ps.pkl", "wb"))
| lsst-sqre/bokeh-plots | backend/get_ps_data.py | Python | gpl-3.0 | 629 | [
"VisIt"
] | f579e62554b4a5eab1203477fd3361589a7f8f0b0b9dd9c6a3bad34065946e5b |
import math
"""
This script is written to convert the FIELD file generated using DL_FIELD (v-4.3) using different
forcefields to a lammps .data file, including both FF information and structural information, i.e.,
ready to run
dl_field.FIELD
modify dl_field.control file --> FIELD,CONFIG,CONTROL,DLF_notation
Note:
harmonic potential in DL_POLY have 1/2 term, so k should divided by 2 in data file
lj potential are the same function in both packages
dihedral, cos in dlpoly == harmonic in lammps; the last 2 columns in FIELD is 1-4 couloumbic and vdw
coefficients cos3 in dlpoly (using opls2005) -- opls in lammps
improper, treated as dihedral in dlpoly; cos in dlpoly == cvff in lammps; might not be complete
"""
dirName = r"F:\simulations\asphaltenes\asphaltene-clay\illite\illite-ac-opls/"
fieldName = "dl_poly.FIELD"
configName = "dl_poly.CONFIG"
xyzName = "config.xyz" # will be converted from CONFIG file
nImpropers = 0 # no improper in DL_POLY, included in Dihedral section
a, b, c = 15.6063, 17.9594, 32 # can read from CONFIG, but it's convenient to specify here
"""
method:
read xyz --> type&index --> type list --> mass&charge&lj&Atoms [write in full]
read field --> connection --> Bonds&Angles&Dihedral [harmonic-harmonic-cvff/opls]
--->Bonds&Angle&Dihedral type can be obtained by checking whether the parameters are the same one
i.e., based on the interactions parameters rather than atoms involved.
"""
# read and parse FIELD
with open(dirName + fieldName, 'r') as foo:
header = [foo.readline() for i in range(5)]
nAtoms = int(foo.readline().split()[1])
type, mass, charge = [], [], []
for i in range(nAtoms):
xtype, xmass, xcharge = foo.readline().split()[:3]
if xtype not in type:
type.append(xtype)
mass.append(float(xmass))
charge.append(float(xcharge))
nBonds = int(foo.readline().split()[1])
bonds, bondCoefficient = [], []
for i in range(nBonds):
harm, atomi, atomj, k, d = foo.readline().split()
if [float(k) / 2, float(d)] not in bondCoefficient:
bondCoefficient.append([float(k) / 2, float(d)])
bonds.append([bondCoefficient.index([float(k) / 2, float(d)]) + 1, int(atomi), int(atomj)])
nAngles = int(foo.readline().split()[1])
angles, angleCoefficient = [], []
for i in range(nAngles):
harm, atomi, atomj, atomk, k, theta = foo.readline().split()
if [float(k) / 2, float(theta)] not in angleCoefficient:
angleCoefficient.append([float(k) / 2, float(theta)])
angles.append([angleCoefficient.index([float(k) / 2, float(theta)]) + 1, int(atomi), int(atomj), int(atomk)])
nDihedrals = int(foo.readline().split()[1])
dihedrals, dihedralCoefficient = [], []
for i in range(nDihedrals):
cos, atomi, atomj, atomk, atoml, k, d, n, lj14, coul14 = foo.readline().split()
if cos == 'cos':
if [float(k), int(math.cos(float(d) * math.pi / 180)), int(float(n))] not in dihedralCoefficient:
dihedralCoefficient.append([float(k), int(math.cos(float(d) * math.pi / 180)), int(float(n))])
dihedrals.append([dihedralCoefficient.index([float(k), int(math.cos(float(d) * math.pi / 180)),
int(float(n))]) + 1, int(atomi), int(atomj), int(atomk), int(atoml)])
elif cos == 'cos3':
if [float(k), float(d), float(n), 0] not in dihedralCoefficient:
dihedralCoefficient.append([float(k), float(d), float(n), 0])
dihedrals.append([dihedralCoefficient.index([float(k), float(d), float(n), 0]) + 1, int(atomi),
int(atomj), int(atomk), int(atoml)])
foo.readline()
nVdw = int(foo.readline().split()[1])
pairCoefficient = []
for i in range(nVdw):
atomi, atomj, lj, eps, sig = foo.readline().split()
for itype in type:
if atomi == itype and atomj == itype:
pairCoefficient.append([float(eps), float(sig), "%s" % itype])
# read coordinates [from xyz]
# with open(dirName + xyzName, 'r') as foo:
# foo.readline() # skip first two lines
# foo.readline() # skip first two lines
# coord = []
# for i in range(nAtoms):
# xtype, xtmp, ytmp, ztmp = foo.readline().split()
# coord.append([xtype, float(xtmp), float(ytmp), float(ztmp)])
# from config
with open(dirName + configName, 'r') as foo:
coord = []
tmp = foo.readlines()
tmp = tmp[5:]
for i in range(len(tmp) / 2):
ttype = tmp[i * 2].split()[0]
tx, ty, tz = tmp[i * 2 + 1].split()
coord.append([ttype, float(tx), float(ty), float(tz)])
# convert CONFIG to xyz
with open(dirName + xyzName, 'w') as foo:
print >> foo, "%d\n xyz converted from %s" % (nAtoms, configName)
for x in coord:
print >> foo, "%s %16.9f%16.9f%16.9f" % (x[0], x[1], x[2], x[3])
# write lammps data
with open(dirName + 'converted.data', 'w') as foo:
print >> foo, "Lammps date file generated with riddle's code from %s\n" % fieldName
print >> foo, "%7d atoms\n%7d bonds\n%7d angles\n%7d dihedrals\n%7d impropers\n" \
% (nAtoms, nBonds, nAngles, nDihedrals, nImpropers)
print >> foo, "%4d atom types\n%4d bond types\n%4d angle types\n%4d dihedral types\n%4d improper types\n" \
% (len(type), len(bondCoefficient), len(angleCoefficient), len(dihedralCoefficient), 0)
print >> foo, "%16.9f%16.9f xlo xhi" % (-a / 2.0, a / 2.0) # the same style as msi2lmp
print >> foo, "%16.9f%16.9f ylo yhi" % (-b / 2.0, b / 2.0) # the same style as msi2lmp
print >> foo, "%16.9f%16.9f zlo zhi" % (-c / 2.0, c / 2.0) # the same style as msi2lmp
print >> foo, "\nMasses\n"
for i, itype in enumerate(type):
print >> foo, "%4d%11.6f # %s" % (i + 1, mass[i], itype)
print >> foo, "\nPair Coeffs # lj/cut/coul/long\n"
for i, x in enumerate(pairCoefficient):
print >> foo, "%4d%15.10f%15.10f # %s" % (i + 1, x[0], x[1], x[2])
print >> foo, "\nBond Coeffs # harmonic\n"
for i, x in enumerate(bondCoefficient):
print >> foo, "%4d%11.4f%11.4f" % (i + 1, x[0], x[1])
print >> foo, "\nAngle Coeffs # harmonic\n"
for i, x in enumerate(angleCoefficient):
print >> foo, "%4d%11.4f%11.4f" % (i + 1, x[0], x[1])
print >> foo, "\nDihedral Coeffs # \n"
for i, x in enumerate(dihedralCoefficient):
if len(x) == 3:
print >> foo, "%4d%11.4f%4d%4d" % (i + 1, x[0], x[1], x[2])
elif len(x) == 4:
print >> foo, "%4d%11.4f%11.4f%11.4f%11.4f" % (i + 1, x[0], x[1], x[2], 0)
#print >> foo, "\nImproper Coeffs # cvff\n" # if not present, error occurs using combinTwofile.py
print >> foo, "\nAtoms\n"
for i, x in enumerate(coord):
print >> foo, "%7d%7d%4d%10.6f%16.9f%16.9f%16.9f%4d%4d%4d # %s" % (
i + 1, 1, type.index(x[0]) + 1, charge[type.index(x[0])],
x[1], x[2], x[3], 0, 0, 0, x[0])
print >> foo, "\nBonds\n"
for i, x in enumerate(bonds):
print >> foo, "%6d%4d%7d%7d" % (i + 1, x[0], x[1], x[2])
print >> foo, "\nAngles\n"
for i, x in enumerate(angles):
print >> foo, "%6d%4d%7d%7d%7d" % (i + 1, x[0], x[1], x[2], x[3])
print >> foo, "\nDihedrals\n"
for i, x in enumerate(dihedrals):
print >> foo, "%6d%4d%7d%7d%7d%7d" % (i + 1, x[0], x[1], x[2], x[3], x[4])
#print >> foo, "\nImpropers\n" # if not present, error occurs using combinTwofile.py
print >> foo, '\n'
| riddlezyc/geolab | src/io/dlfield2lmp.py | Python | gpl-3.0 | 7,742 | [
"DL_POLY",
"LAMMPS"
] | a14de9f4788f45200bd723bf25542d7dd46a25ccd8e58620587ab7b0c10a0547 |
import numpy as np
from ase import Hartree
from gpaw.aseinterface import GPAW
from gpaw.lcao.overlap import NewTwoCenterIntegrals
from gpaw.utilities import unpack
from gpaw.utilities.tools import tri2full, lowdin
from gpaw.lcao.tools import basis_subset2, get_bfi2
from gpaw.coulomb import get_vxc as get_ks_xc
from gpaw.utilities.blas import r2k, gemm
from gpaw.lcao.projected_wannier import dots, condition_number, eigvals, \
get_bfs, get_lcao_projections_HSP
def get_rot(F_MM, V_oM, L):
eps_M, U_MM = np.linalg.eigh(F_MM)
indices = eps_M.real.argsort()[-L:]
U_Ml = U_MM[:, indices]
U_Ml /= np.sqrt(dots(U_Ml.T.conj(), F_MM, U_Ml).diagonal())
U_ow = V_oM.copy()
U_lw = np.dot(U_Ml.T.conj(), F_MM)
for col1, col2 in zip(U_ow.T, U_lw.T):
norm = np.linalg.norm(np.hstack((col1, col2)))
col1 /= norm
col2 /= norm
return U_ow, U_lw, U_Ml
def get_lcao_xc(calc, P_aqMi, bfs=None, spin=0):
nq = len(calc.wfs.ibzk_qc)
nao = calc.wfs.setups.nao
dtype = calc.wfs.dtype
if bfs is None:
bfs = get_bfs(calc)
if calc.density.nt_sg is None:
calc.density.interpolate()
nt_sg = calc.density.nt_sg
vxct_sg = calc.density.finegd.zeros(calc.wfs.nspins)
calc.hamiltonian.xc.calculate(calc.density.finegd, nt_sg, vxct_sg)
vxct_G = calc.wfs.gd.zeros()
calc.hamiltonian.restrict(vxct_sg[spin], vxct_G)
Vxc_qMM = np.zeros((nq, nao, nao), dtype)
for q, Vxc_MM in enumerate(Vxc_qMM):
bfs.calculate_potential_matrix(vxct_G, Vxc_MM, q)
tri2full(Vxc_MM, 'L')
# Add atomic PAW corrections
for a, P_qMi in P_aqMi.items():
D_sp = calc.density.D_asp[a][:]
H_sp = np.zeros_like(D_sp)
calc.wfs.setups[a].xc_correction.calculate(calc.hamiltonian.xc,
D_sp, H_sp)
H_ii = unpack(H_sp[spin])
for Vxc_MM, P_Mi in zip(Vxc_qMM, P_qMi):
Vxc_MM += dots(P_Mi, H_ii, P_Mi.T.conj())
return Vxc_qMM * Hartree
def get_xc2(calc, w_wG, P_awi, spin=0):
if calc.density.nt_sg is None:
calc.density.interpolate()
nt_g = calc.density.nt_sg[spin]
vxct_g = calc.density.finegd.zeros()
calc.hamiltonian.xc.get_energy_and_potential(nt_g, vxct_g)
vxct_G = calc.wfs.gd.empty()
calc.hamiltonian.restrict(vxct_g, vxct_G)
# Integrate pseudo part
Nw = len(w_wG)
xc_ww = np.empty((Nw, Nw))
r2k(.5 * calc.wfs.gd.dv, w_wG, vxct_G * w_wG, .0, xc_ww)
tri2full(xc_ww, 'L')
# Add atomic PAW corrections
for a, P_wi in P_awi.items():
D_sp = calc.density.D_asp[a][:]
H_sp = np.zeros_like(D_sp)
calc.wfs.setups[a].xc_correction.calculate_energy_and_derivatives(
D_sp, H_sp)
H_ii = unpack(H_sp[spin])
xc_ww += dots(P_wi, H_ii, P_wi.T.conj())
return xc_ww * Hartree
class ProjectedWannierFunctionsFBL:
"""PWF in the finite band limit.
::
--N
|w_w> = > |psi_n> U_nw
--n=1
"""
def __init__(self, V_nM, No, ortho=False):
Nw = V_nM.shape[1]
assert No <= Nw
V_oM, V_uM = V_nM[:No], V_nM[No:]
F_MM = np.dot(V_uM.T.conj(), V_uM)
U_ow, U_lw, U_Ml = get_rot(F_MM, V_oM, Nw - No)
self.U_nw = np.vstack((U_ow, dots(V_uM, U_Ml, U_lw)))
# stop here ?? XXX
self.S_ww = self.rotate_matrix(np.ones(1))
if ortho:
lowdin(self.U_nw, self.S_ww)
self.S_ww = np.identity(Nw)
self.norms_n = np.dot(self.U_nw, np.linalg.solve(
self.S_ww, self.U_nw.T.conj())).diagonal()
def rotate_matrix(self, A_nn):
if A_nn.ndim == 1:
return np.dot(self.U_nw.T.conj() * A_nn, self.U_nw)
else:
return dots(self.U_nw.T.conj(), A_nn, self.U_nw)
def rotate_projections(self, P_ani):
P_awi = {}
for a, P_ni in P_ani.items():
P_awi[a] = np.tensordot(self.U_nw, P_ni, axes=[[0], [0]])
return P_awi
def rotate_function(self, psit_nG):
return np.tensordot(self.U_nw, psit_nG, axes=[[0], [0]])
class ProjectedWannierFunctionsIBL:
"""PWF in the infinite band limit.
::
--No --Nw
|w_w> = > |psi_o> U_ow + > |f_M> U_Mw
--o=1 --M=1
"""
def __init__(self, V_nM, S_MM, No, lcaoindices=None):
Nw = V_nM.shape[1]
assert No <= Nw
self.V_oM, V_uM = V_nM[:No], V_nM[No:]
F_MM = S_MM - np.dot(self.V_oM.T.conj(), self.V_oM)
U_ow, U_lw, U_Ml = get_rot(F_MM, self.V_oM, Nw - No)
self.U_Mw = np.dot(U_Ml, U_lw)
self.U_ow = U_ow - np.dot(self.V_oM, self.U_Mw)
if lcaoindices is not None:
for i in lcaoindices:
self.U_ow[:, i] = 0.0
self.U_Mw[:, i] = 0.0
self.U_Mw[i, i] = 1.0
# stop here ?? XXX
self.S_ww = self.rotate_matrix(np.ones(1), S_MM)
P_uw = np.dot(V_uM, self.U_Mw)
self.norms_n = np.hstack((
np.dot(U_ow, np.linalg.solve(self.S_ww, U_ow.T.conj())).diagonal(),
np.dot(P_uw, np.linalg.solve(self.S_ww, P_uw.T.conj())).diagonal()))
def rotate_matrix(self, A_o, A_MM):
assert A_o.ndim == 1
A_ww = dots(self.U_ow.T.conj() * A_o, self.V_oM, self.U_Mw)
A_ww += np.conj(A_ww.T)
A_ww += np.dot(self.U_ow.T.conj() * A_o, self.U_ow)
A_ww += dots(self.U_Mw.T.conj(), A_MM, self.U_Mw)
return A_ww
def rotate_projections(self, P_aoi, P_aMi, indices=None):
if indices is None:
U_ow = self.U_ow
U_Mw = self.U_Mw
else:
U_ow = self.U_ow[:, indices]
U_Mw = self.U_Mw[:, indices]
P_awi = {}
for a, P_oi in P_aoi.items():
P_awi[a] = np.tensordot(U_Mw, P_aMi[a], axes=[[0], [0]])
if len(U_ow) > 0:
P_awi[a] += np.tensordot(U_ow, P_oi, axes=[[0], [0]])
return P_awi
def rotate_function(self, psit_oG, bfs, q=-1, indices=None):
if indices is None:
U_ow = self.U_ow
U_Mw = self.U_Mw
else:
U_ow = self.U_ow[:, indices]
U_Mw = self.U_Mw[:, indices]
w_wG = np.zeros((U_ow.shape[1],) + psit_oG.shape[1:])
if len(U_ow) > 0:
gemm(1., psit_oG, U_ow.T.copy(), 0., w_wG)
bfs.lcao_to_grid(U_Mw.T.copy(), w_wG, q)
return w_wG
class PWFplusLCAO(ProjectedWannierFunctionsIBL):
def __init__(self, V_nM, S_MM, No, pwfmask, lcaoindices=None):
Nw = V_nM.shape[1]
self.V_oM = V_nM[:No]
dtype = V_nM.dtype
# Do PWF optimization for pwfbasis submatrix only!
Npwf = len(pwfmask.nonzero()[0])
pwfmask2 = np.outer(pwfmask, pwfmask)
s_MM = S_MM[pwfmask2].reshape(Npwf, Npwf)
v_oM = self.V_oM[:, pwfmask]
f_MM = s_MM - np.dot(v_oM.T.conj(), v_oM)
nw = len(s_MM)
assert No <= nw
u_ow, u_lw, u_Ml = get_rot(f_MM, v_oM, nw - No)
u_Mw = np.dot(u_Ml, u_lw)
u_ow = u_ow - np.dot(v_oM, u_Mw)
# Determine U for full lcao basis
self.U_ow = np.zeros((No, Nw), dtype)
for U_w, u_w in zip(self.U_ow, u_ow):
np.place(U_w, pwfmask, u_w)
self.U_Mw = np.identity(Nw, dtype)
np.place(self.U_Mw, pwfmask2, u_Mw.flat)
if lcaoindices is not None:
for i in lcaoindices:
self.U_ow[:, i] = 0.0
self.U_Mw[:, i] = 0.0
self.U_Mw[i, i] = 1.0
self.S_ww = self.rotate_matrix(np.ones(1), S_MM)
self.norms_n = None
def set_lcaoatoms(calc, pwf, lcaoatoms):
ind = get_bfi(calc, lcaoatoms)
for i in ind:
pwf.U_ow[:, i] = 0.0
pwf.U_Mw[:, i] = 0.0
pwf_U_Mw[i, i] = 1.0
class PWF2:
def __init__(self, gpwfilename, fixedenergy=0., spin=0, ibl=True,
basis='sz', zero_fermi=False, pwfbasis=None, lcaoatoms=None,
projection_data=None):
calc = GPAW(gpwfilename, txt=None, basis=basis)
assert calc.wfs.gd.comm.size == 1
assert calc.wfs.kpt_comm.size == 1
assert calc.wfs.band_comm.size == 1
if zero_fermi:
try:
Ef = calc.get_fermi_level()
except NotImplementedError:
Ef = calc.get_homo_lumo().mean()
else:
Ef = 0.0
self.ibzk_kc = calc.get_ibz_k_points()
self.nk = len(self.ibzk_kc)
self.eps_kn = [calc.get_eigenvalues(kpt=q, spin=spin) - Ef
for q in range(self.nk)]
self.M_k = [sum(eps_n <= fixedenergy) for eps_n in self.eps_kn]
print 'Fixed states:', self.M_k
self.calc = calc
self.dtype = self.calc.wfs.dtype
self.spin = spin
self.ibl = ibl
self.pwf_q = []
self.norms_qn = []
self.S_qww = []
self.H_qww = []
if ibl:
if pwfbasis is not None:
pwfmask = basis_subset2(calc.atoms.get_chemical_symbols(),
basis, pwfbasis)
if lcaoatoms is not None:
lcaoindices = get_bfi2(calc.atoms.get_chemical_symbols(),
basis,
lcaoatoms)
else:
lcaoindices = None
self.bfs = get_bfs(calc)
if projection_data is None:
V_qnM, H_qMM, S_qMM, self.P_aqMi = get_lcao_projections_HSP(
calc, bfs=self.bfs, spin=spin, projectionsonly=False)
else:
V_qnM, H_qMM, S_qMM, self.P_aqMi = projection_data
H_qMM -= Ef * S_qMM
for q, M in enumerate(self.M_k):
if pwfbasis is None:
pwf = ProjectedWannierFunctionsIBL(V_qnM[q], S_qMM[q], M,
lcaoindices)
else:
pwf = PWFplusLCAO(V_qnM[q], S_qMM[q], M, pwfmask,
lcaoindices)
self.pwf_q.append(pwf)
self.norms_qn.append(pwf.norms_n)
self.S_qww.append(pwf.S_ww)
self.H_qww.append(pwf.rotate_matrix(self.eps_kn[q][:M],
H_qMM[q]))
else:
if projection_data is None:
V_qnM = get_lcao_projections_HSP(calc, spin=spin)
else:
V_qnM = projection_data
for q, M in enumerate(self.M_k):
pwf = ProjectedWannierFunctionsFBL(V_qnM[q], M, ortho=False)
self.pwf_q.append(pwf)
self.norms_qn.append(pwf.norms_n)
self.S_qww.append(pwf.S_ww)
self.H_qww.append(pwf.rotate_matrix(self.eps_kn[q]))
for S in self.S_qww:
print 'Condition number: %0.1e' % condition_number(S)
def get_hamiltonian(self, q=0, indices=None):
if indices is None:
return self.H_qww[q]
else:
return self.H_qww[q].take(indices, 0).take(indices, 1)
def get_overlap(self, q=0, indices=None):
if indices is None:
return self.S_qww[q]
else:
return self.S_qww[q].take(indices, 0).take(indices, 1)
def get_projections(self, q=0, indices=None):
kpt = self.calc.wfs.kpt_u[self.spin * self.nk + q]
if not hasattr(self, 'P_awi'):
if self.ibl:
M = self.M_k[q]
self.P_awi = self.pwf_q[q].rotate_projections(
dict([(a, P_ni[:M]) for a, P_ni in kpt.P_ani.items()]),
dict([(a, P_qMi[q]) for a, P_qMi in self.P_aqMi.items()]),
indices)
else:
self.P_awi = pwf.rotate_projections(kpt.P_ani, indices)
return self.P_awi
def get_orbitals(self, q=0, indices=None):
self.calc.wfs.initialize_wave_functions_from_restart_file()
kpt = self.calc.wfs.kpt_u[self.spin * self.nk + q]
if not hasattr(self, 'w_wG'):
if self.ibl:
self.w_wG = self.pwf_q[q].rotate_function(
kpt.psit_nG[:self.M_k[q]], self.bfs, q, indices)
else:
self.w_wG = self.pwf_q[q].rotate_function(
kpt.psit_nG, indices)
return self.w_wG
def get_Fcore(self, q=0, indices=None):
if indices is None:
Fcore_ww = np.zeros_like(self.H_qww[q])
else:
Fcore_ww = np.zeros((len(indices), len(indices)))
for a, P_wi in self.get_projections(q, indices).items():
X_ii = unpack(self.calc.wfs.setups[a].X_p)
Fcore_ww -= dots(P_wi, X_ii, P_wi.T.conj())
return Fcore_ww * Hartree
def get_eigs(self, q=0):
return eigvals(self.H_qww[q], self.S_ww[q])
def get_condition_number(self, q=0):
return condition_number(self.S_qww[q])
def get_xc(self, q=0, indices=None):
#self.calc.density.ghat.set_positions(
# self.calc.atoms.get_scaled_positions() % 1.)
#self.calc.hamiltonian.poisson.initialize()
if self.ibl:
return get_xc2(self.calc, self.get_orbitals(q, indices),
self.get_projections(q, indices), self.spin)
else:
return self.pwf_q[q].rotate_matrix(get_ks_xc(self.calc,
spin=self.spin))
class LCAOwrap:
def __init__(self, calc, spin=0):
assert calc.wfs.gd.comm.size == 1
assert calc.wfs.kpt_comm.size == 1
assert calc.wfs.band_comm.size == 1
from gpaw.lcao.tools import get_lcao_hamiltonian
H_skMM, S_kMM = get_lcao_hamiltonian(calc)
self.calc = calc
self.dtype = calc.wfs.dtype
self.spin = spin
self.H_qww = H_skMM[spin]
self.S_qww = S_kMM
self.P_aqwi = calc.wfs.P_aqMi
self.Nw = self.S_qww.shape[-1]
for S in self.S_qww:
print 'Condition number: %0.1e' % condition_number(S)
def get_hamiltonian(self, q=0, indices=None):
if indices is None:
return self.H_qww[q]
else:
return self.H_qww[q].take(indices, 0).take(indices, 1)
def get_overlap(self, q=0, indices=None):
if indices is None:
return self.S_qww[q]
else:
return self.S_qww[q].take(indices, 0).take(indices, 1)
def get_projections(self, q=0, indices=None):
if indices is None:
return dict([(a, P_qwi[q]) for a, P_qwi in self.P_aqwi.items()])
else:
return dict([(a, P_qwi[q].take(indices, 0))
for a, P_qwi in self.P_aqwi.items()])
def get_orbitals(self, q=-1, indices=None):
assert q == -1
if indices is None:
indices = range(self.Nw)
Ni = len(indices)
C_wM = np.zeros((Ni, self.Nw), self.dtype)
for i, C_M in zip(indices, C_wM):
C_M[i] = 1.0
w_wG = self.calc.wfs.gd.zeros(Ni, dtype=self.dtype)
self.calc.wfs.basis_functions.lcao_to_grid(C_wM, w_wG, q=-1)
return w_wG
def get_Fcore(self, q=0, indices=None):
if indices is None:
Fcore_ww = np.zeros_like(self.H_qww[q])
else:
Fcore_ww = np.zeros((len(indices), len(indices)))
for a, P_wi in self.get_projections(q, indices).items():
if self.calc.wfs.setups[a].type != 'ghost':
X_ii = unpack(self.calc.wfs.setups[a].X_p)
Fcore_ww -= dots(P_wi, X_ii, P_wi.T.conj())
return Fcore_ww * Hartree
def get_xc(self, q=0, indices=None):
if not hasattr(self, 'Vxc_qww'):
self.Vxc_qww = get_lcao_xc(self.calc, self.P_aqwi,
bfs=self.calc.wfs.basis_functions,
spin=self.spin)
if indices is None:
return self.Vxc_qww[q]
else:
return self.Vxc_qww[q].take(indices, 0).take(indices, 1)
| qsnake/gpaw | gpaw/lcao/pwf2.py | Python | gpl-3.0 | 16,256 | [
"ASE",
"GPAW"
] | c11632775516c5bfeeff199524fcb69598ab8c6892456e3353cfc33931f272ea |
# coding=utf-8
from splinter import Browser
import unittest
from google.appengine.ext import testbed
browser = Browser()
class StaticPagesTest(unittest.TestCase):
def setUp(self):
global browser
self.tb = testbed.Testbed()
self.tb.activate()
self.tb.init_memcache_stub()
def tearDown(self):
self.tb.deactivate()
def test_splinter_social_linkedin(self):
browser.visit('http://127.0.0.1:8080/')
self.assertEqual(browser.is_text_present('iandouglas.com'), True)
browser.click_link_by_partial_href('linkedin.com')
self.assertIn('https://www.linkedin.com/', browser.url)
def test_zzzzz_last_test(self):
browser.quit()
| iandouglas/flask-gae-skeleton | tests/integration/static/test_social_linkedin.py | Python | mit | 715 | [
"VisIt"
] | a7af5f06a1c855669e4b0adb87716cdd22c93fdb6d2417dfa2e5482255a38380 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
PROG_VERSION = u"Time-stamp: <2022-01-07 17:10:27 vk>"
# TODO:
# * fix parts marked with «FIXXME»
# ===================================================================== ##
# You might not want to modify anything below this line if you do not ##
# know, what you are doing :-) ##
# ===================================================================== ##
# NOTE: in case of issues, check iCalendar files using: http://icalvalid.cloudapp.net/
import re
import sys
import os
import time
import logging
from optparse import OptionParser
import readline # for raw_input() reading from stdin
PROG_VERSION_DATE = PROG_VERSION[13:23]
INVOCATION_TIME = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
FILENAME_TAG_SEPARATOR = ' -- ' # between file name and (optional) list of tags
BETWEEN_TAG_SEPARATOR = ' ' # between tags (not that relevant in this tool)
DEFAULT_TEXT_SEPARATOR = ' ' # between old file name and inserted text
RENAME_SYMLINK_ORIGINALS_WHEN_RENAMING_SYMLINKS = True # if current file is a symlink with the same name, also rename source file
WITHTIME_AND_SECONDS_PATTERN = re.compile('^(\d{4,4}-[01]\d-[0123]\d(([T :_-])([012]\d)([:.-])([012345]\d)(([:.-])([012345]\d))?)?)[- _.](.+)')
USAGE = "\n\
appendfilename [<options>] <list of files>\n\
\n\
This tool inserts text between the old file name and optional tags or file extension.\n\
\n\
\n\
Text within file names is placed between the actual file name and\n\
the file extension or (if found) between the actual file namd and\n\
a set of tags separated with \"" + FILENAME_TAG_SEPARATOR + "\".\n\
Update for the Boss " + DEFAULT_TEXT_SEPARATOR + "<NEW TEXT HERE>.pptx\n\
2013-05-16T15.31.42 Error message" + DEFAULT_TEXT_SEPARATOR + "<NEW TEXT HERE>" \
+ FILENAME_TAG_SEPARATOR + "screenshot" + BETWEEN_TAG_SEPARATOR + "projectB.png\n\
\n\
When renaming a symbolic link whose source file has a matching file\n\
name, the source file gets renamed as well.\n\
\n\
Example usages:\n\
appendfilename --text=\"of projectA\" \"the presentation.pptx\"\n\
... results in \"the presentation" + DEFAULT_TEXT_SEPARATOR + "of projectA.pptx\"\n\
appendfilename \"2013-05-09T16.17_img_00042 -- fun.jpeg\"\n\
... with interactive input of \"Peter\" results in:\n\
\"2013-05-09T16.17_img_00042" + DEFAULT_TEXT_SEPARATOR + "Peter -- fun.jpeg\"\n\
\n\
\n\
:copyright: (c) 2013 or later by Karl Voit <tools@Karl-Voit.at>\n\
:license: GPL v3 or any later version\n\
:URL: https://github.com/novoid/appendfilename\n\
:bugreports: via github or <tools@Karl-Voit.at>\n\
:version: " + PROG_VERSION_DATE + "\n"
# file names containing optional tags matches following regular expression
FILE_WITH_EXTENSION_REGEX = re.compile("(.*?)(( -- .*)?(\.\w+?)?)$")
FILE_WITH_EXTENSION_BASENAME_INDEX = 1
FILE_WITH_EXTENSION_TAGS_AND_EXT_INDEX = 2
# RegEx which defines "what is a file name component" for tab completion:
FILENAME_COMPONENT_REGEX = re.compile("[a-zA-Z]+")
# blacklist of lowercase strings that are being ignored for tab completion
FILENAME_COMPONENT_LOWERCASE_BLACKLIST = ['img', 'eine', 'einem', 'eines', 'fuer', 'haben',
'machen', 'macht', 'mein', 'meine', 'meinem',
'meinen', 'meines', 'neuem', 'neuer', 'neuen', 'vkvlc']
# initial CV with strings that are provided for tab completion in any case (whitelist)
INITIAL_CONTROLLED_VOCABULARY = ['Karl', 'Graz', 'LaTeX', 'specialL', 'specialP']
parser = OptionParser(usage=USAGE)
parser.add_option("-t", "--text", dest="text",
help="the text to add to the file name")
parser.add_option("-p", "--prepend", dest="prepend", action="store_true",
help="do the opposite: instead of appending the text, prepend the text")
parser.add_option("--smart-prepend", dest="smartprepend", action="store_true",
help="Like \"--prepend\" but do respect date/time-stamps: insert new text between \"YYYY-MM-DD(Thh.mm(.ss))\" and rest")
parser.add_option("--separator",
metavar="separator",
default=" ",
help='override the defailt text separator which is "' + DEFAULT_TEXT_SEPARATOR + '"')
parser.add_option("-d", "--dryrun", dest="dryrun", action="store_true",
help="enable dryrun mode: just simulate what would happen, do not modify file(s)")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
help="enable verbose mode")
parser.add_option("-q", "--quiet", dest="quiet", action="store_true",
help="enable quiet mode")
parser.add_option("--version", dest="version", action="store_true",
help="display version and exit")
(options, args) = parser.parse_args()
def handle_logging():
"""Log handling and configuration"""
if options.verbose:
FORMAT = "%(levelname)-8s %(asctime)-15s %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
elif options.quiet:
FORMAT = "%(levelname)-8s %(message)s"
logging.basicConfig(level=logging.ERROR, format=FORMAT)
else:
FORMAT = "%(levelname)-8s %(message)s"
logging.basicConfig(level=logging.INFO, format=FORMAT)
def error_exit(errorcode, text):
"""exits with return value of errorcode and prints to stderr"""
sys.stdout.flush()
logging.error(text)
#input('Press <Enter> to finish with return value %i ...' % errorcode).strip()
sys.exit(errorcode)
class SimpleCompleter(object):
# happily stolen from http://pymotw.com/2/readline/
def __init__(self, options):
self.options = sorted(options)
return
def complete(self, text, state):
response = None
if state == 0:
# This is the first time for this text, so build a match list.
if text:
self.matches = [s
for s in self.options
if s and s.startswith(text)]
logging.debug('%s matches: %s', repr(text), self.matches)
else:
self.matches = self.options[:]
logging.debug('(empty input) matches: %s', self.matches)
# Return the state'th item from the match list,
# if we have that many.
try:
response = self.matches[state]
except IndexError:
response = None
logging.debug('complete(%s, %s) => %s',
repr(text), state, repr(response))
return response
def locate_and_parse_controlled_vocabulary():
"""This method is looking for filenames in the current directory
and parses them. This results in a list of words which are used for tab completion.
@param return: either False or a list of found words (strings)
"""
cv = INITIAL_CONTROLLED_VOCABULARY
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in files:
# extract all words from the file name that don't contain numbers
new_items = FILENAME_COMPONENT_REGEX.findall(os.path.splitext(os.path.basename(f))[0])
# remove words that are too small
new_items = [item for item in new_items if len(item) > 3]
# remove words that are listed in the blacklist
new_items = [item for item in new_items if item.lower() not in FILENAME_COMPONENT_LOWERCASE_BLACKLIST]
# remove words that are already in the controlled vocabulary
new_items = [item for item in new_items if item not in cv]
# append newly found words to the controlled vocabulary
cv.extend(new_items)
if len(cv) > 0:
return cv
else:
return False
def is_broken_link(name):
"""
This function determines if the given name points to a file that is a broken link.
It returns False for any other cases such as non existing files and so forth.
@param name: an unicode string containing a file name
@param return: boolean
"""
if os.path.isfile(name) or os.path.isdir(name):
return False
try:
return not os.path.exists(os.readlink(name))
except FileNotFoundError:
return False
def is_nonbroken_symlink_file(filename):
"""
Returns true if the filename is a non-broken symbolic link and not just an ordinary file. False, for any other case like no file at all.
@param filename: an unicode string containing a file name
@param return: bookean
"""
if os.path.isfile(filename):
if os.path.islink(filename):
return True
else:
return False
def get_link_source_file(filename):
"""
Return a string representing the path to which the symbolic link points.
@param filename: an unicode string containing a file name
@param return: file path string
"""
assert(os.path.islink(filename))
return os.readlink(filename)
def handle_file_and_symlink_source_if_found(filename, text, dryrun):
"""
Wraps handle_file() so that if the current filename is a symbolic link,
modify the source file and re-link its new name before handling the
current filename.
@param filename: string containing one file name
@param text: string that shall be added to file name(s)
@param dryrun: boolean which defines if files should be changed (False) or not (True)
@param return: number of errors and optional new filename
"""
num_errors = 0
# if filename is a symbolic link and has same basename, tag the source file as well:
if RENAME_SYMLINK_ORIGINALS_WHEN_RENAMING_SYMLINKS and is_nonbroken_symlink_file(filename):
old_sourcefilename = get_link_source_file(filename)
if os.path.basename(old_sourcefilename) == os.path.basename(filename):
new_errors, new_sourcefilename = handle_file(old_sourcefilename, text, dryrun)
num_errors += new_errors
if old_sourcefilename != new_sourcefilename:
logging.info('Renaming the symlink-destination file of "' + filename + '" ("' +
old_sourcefilename + '") as well …')
if options.dryrun:
logging.debug('I would re-link the old sourcefilename "' + old_sourcefilename +
'" to the new one "' + new_sourcefilename + '"')
else:
logging.debug('re-linking symlink "' + filename + '" from the old sourcefilename "' +
old_sourcefilename + '" to the new one "' + new_sourcefilename + '"')
os.remove(filename)
os.symlink(new_sourcefilename, filename)
else:
logging.debug('The old sourcefilename "' + old_sourcefilename + '" did not change. So therefore I don\'t re-link.')
else:
logging.debug('The file "' + os.path.basename(filename) + '" is a symlink to "' + old_sourcefilename +
'" but they two do have different basenames. Therefore I ignore the original file.')
# after handling potential symlink originals, I now handle the file we were talking about in the first place:
return handle_file(filename, text, dryrun)
def handle_file(filename, text, dryrun):
"""
@param filename: one file name
@param text: string that shall be added to file name(s)
@param dryrun: boolean which defines if files should be changed (False) or not (True)
@param return: number of errors and optional new filename
"""
assert(isinstance(filename, str))
num_errors = 0
new_filename = ''
if os.path.isdir(filename):
logging.warning("Skipping directory \"%s\" because this tool only processes file names." % filename)
num_errors += 1
return num_errors, False
elif not os.path.isfile(filename):
logging.error("Skipping \"%s\" because this tool only processes existing file names." % filename)
num_errors += 1
return num_errors, False
components = re.match(FILE_WITH_EXTENSION_REGEX, os.path.basename(filename))
if components:
old_basename = components.group(FILE_WITH_EXTENSION_BASENAME_INDEX)
tags_with_extension = components.group(FILE_WITH_EXTENSION_TAGS_AND_EXT_INDEX)
else:
logging.error('Could not extract file name components of \"%s\". Please do report.' % str(filename))
num_errors += 1
return num_errors, False
try:
if options.prepend:
logging.debug('options.prepend is set with |' + str(os.path.dirname(filename)) + '|' +
str(text) + '|' + str(separator()) + '|' + str(old_basename) + '|' + str(tags_with_extension))
new_filename = os.path.join(os.path.dirname(filename), text + separator() + old_basename + tags_with_extension)
elif options.smartprepend:
match = re.match(WITHTIME_AND_SECONDS_PATTERN, filename)
logging.debug('options.smartprepend is set with |' + str(os.path.dirname(filename)) + '|' +
str(text) + '|' + str(separator()) + '|' + str(old_basename) + '|' + str(tags_with_extension))
logging.debug('options.smartprepend is set with |' + str(type(os.path.dirname(filename))) + '|' +
str(type(text)) + '|' + str(type(separator())) + '|' + str(type(old_basename)) + '|' + str(type(tags_with_extension)))
if not match:
logging.debug('can\'t find a date/time-stamp, doing a simple prepend')
new_filename = os.path.join(os.path.dirname(filename), text + separator() + old_basename + tags_with_extension)
else:
logging.debug('date/time-stamp found, insert text between date/time-stamp and rest')
logging.debug('options.smartprepend is set with |' + str(os.path.dirname(filename)) + '|' +
str(match.group(1)) + '|' + str(match.group(len(match.groups()))) + '|')
logging.debug('options.smartprepend is set with |' + str(type(os.path.dirname(filename))) + '|' +
str(type(match.group(1))) + '|' + str(type(match.group(len(match.groups())))) + '|')
new_filename = os.path.join(os.path.dirname(filename), match.group(1) + separator() + text + separator() + match.group(len(match.groups())))
logging.debug('new_filename is now: ' + new_filename)
else:
new_filename = os.path.join(os.path.dirname(filename), old_basename + separator() + text + tags_with_extension)
except:
logging.error("Error while trying to build new filename: " + str(sys.exc_info()[0]))
num_errors += 1
return num_errors, False
assert(isinstance(new_filename, str))
if dryrun:
logging.info(" ")
logging.info(" renaming \"%s\"" % filename)
logging.info(" ⤷ \"%s\"" % (new_filename))
else:
logging.debug(" renaming \"%s\"" % filename)
logging.debug(" ⤷ \"%s\"" % (new_filename))
try:
os.rename(filename, new_filename)
except:
logging.error("Error while trying to rename file: " + str(sys.exc_info()))
num_errors += 1
return num_errors, False
return num_errors, new_filename
def separator():
"""returns the separator between the previous file name and the new text"""
if options.separator:
## FIXXME: the user-provided separator is not checked at all: please do add some checks like removing '\n' and similar.
return options.separator
else:
return DEFAULT_TEXT_SEPARATOR
def main():
"""Main function"""
if options.version:
print(os.path.basename(sys.argv[0]) + " version " + PROG_VERSION_DATE)
sys.exit(0)
handle_logging()
if options.verbose and options.quiet:
error_exit(1, "Options \"--verbose\" and \"--quiet\" found. " +
"This does not make any sense, you silly fool :-)")
if options.prepend and options.smartprepend:
error_exit(3, "Options \"--prepend\" and \"--smart-prepend\" found. " +
"This does not make any sense, you silly fool :-)")
if len(sys.argv) < 2:
# not a single command line parameter is given -> print help instead of asking for a string
parser.print_help()
sys.exit(0)
text = options.text
if not text:
logging.debug("interactive mode: asking for text ...")
logging.info("Add text to file name ...")
vocabulary = locate_and_parse_controlled_vocabulary()
if vocabulary:
assert(vocabulary.__class__ == list)
# Register our completer function
readline.set_completer(SimpleCompleter(vocabulary).complete)
# Use the tab key for completion
readline.parse_and_bind('tab: complete')
tabcompletiondescription = '; complete ' + str(len(vocabulary)) + ' words with TAB'
print(' (abort with Ctrl-C' + tabcompletiondescription + ')')
print()
text = input('Please enter text: ').strip()
if not text or len(text) < 1:
logging.info("no text given, exiting.")
sys.stdout.flush()
sys.exit(0)
logging.info("adding text \"%s\" ..." % text)
logging.debug("text found: [%s]" % text)
logging.debug("extracting list of files ...")
logging.debug("len(args) [%s]" % str(len(args)))
if len(args) < 1:
error_exit(2, "Please add at least one file name as argument")
files = args
logging.debug("%s filenames found: [%s]" % (str(len(files)), '], ['.join(files)))
logging.debug("iterate over files ...")
for filename in files:
if is_broken_link(filename):
# skip broken links completely and write error message:
logging.error('File "' + filename + '" is a broken symbolic link. Skipping this one …')
else:
# if filename is a symbolic link, tag the source file as well:
num_errors, new_filename = handle_file_and_symlink_source_if_found(filename, text, options.dryrun)
if num_errors > 0:
error_exit(4, str(num_errors) + ' error(s) occurred. Please check output above.')
logging.debug("successfully finished.")
if options.verbose:
input('Please press <Enter> for finishing...').strip()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
logging.info("Received KeyboardInterrupt")
# END OF FILE #################################################################
| novoid/appendfilename | appendfilename/__init__.py | Python | gpl-3.0 | 18,761 | [
"NAMD"
] | 6d2c1d94f0ecd0a0f45f962058e46d284127502d40537ac88201b5504439d038 |
'''
GOCDBSyncCommand module
This command updates the downtime dates from the DowntimeCache table in case they changed
after being fetched from GOCDB. In other words, it ensures that all the downtime dates in
the database are current.
'''
import errno
import xml.dom.minidom as minidom
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.LCG.GOCDBClient import GOCDBClient
from DIRAC.Core.LCG.GOCDBClient import _parseSingleElement
from DIRAC.ResourceStatusSystem.Command.Command import Command
from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient
__RCSID__ = '$Id: $'
class GOCDBSyncCommand( Command ):
def __init__( self, args = None, clients = None ):
super( GOCDBSyncCommand, self ).__init__( args, clients )
if 'GOCDBClient' in self.apis:
self.gClient = self.apis[ 'GOCDBClient' ]
else:
self.gClient = GOCDBClient()
if 'ResourceManagementClient' in self.apis:
self.rmClient = self.apis[ 'ResourceManagementClient' ]
else:
self.rmClient = ResourceManagementClient()
self.seenHostnames = set()
def doNew( self, masterParams = None ):
"""
Gets the downtime IDs and dates of a given hostname from the local database and compares the results
with the remote database of GOCDB. If the downtime dates have been changed it updates the local database.
:param: `masterParams` - string
:return: S_OK / S_ERROR
"""
if masterParams:
hostname = masterParams
else:
return S_ERROR(errno.EINVAL, 'masterParams is not provided')
result = self.rmClient.selectDowntimeCache( name = hostname )
if not result[ 'OK' ]:
return result
for downtimes in result['Value']:
localDBdict = { 'DowntimeID': downtimes[3],
'FORMATED_START_DATE': downtimes[6].strftime('%Y-%m-%d %H:%M'),
'FORMATED_END_DATE': downtimes[7].strftime('%Y-%m-%d %H:%M') }
response = self.gClient.getHostnameDowntime( hostname, ongoing = True )
if not response['OK']:
return response
doc = minidom.parseString( response['Value'] )
downtimeElements = doc.getElementsByTagName( "DOWNTIME" )
for dtElement in downtimeElements:
GOCDBdict = _parseSingleElement( dtElement, [ 'PRIMARY_KEY', 'ENDPOINT',
'FORMATED_START_DATE', 'FORMATED_END_DATE' ] )
localDowntimeID = localDBdict['DowntimeID']
GOCDBDowntimeID = GOCDBdict['PRIMARY_KEY'] + ' ' + GOCDBdict['ENDPOINT']
if localDowntimeID == GOCDBDowntimeID:
if localDBdict['FORMATED_START_DATE'] != GOCDBdict['FORMATED_START_DATE']:
result = self.rmClient.addOrModifyDowntimeCache( downtimeID = localDBdict['DowntimeID'],
startDate = GOCDBdict['FORMATED_START_DATE'])
gLogger.verbose("The start date of %s has been changed!" % downtimes[3])
if not result[ 'OK' ]:
return result
if localDBdict['FORMATED_END_DATE'] != GOCDBdict['FORMATED_END_DATE']:
result = self.rmClient.addOrModifyDowntimeCache( downtimeID = localDBdict['DowntimeID'],
endDate = GOCDBdict['FORMATED_END_DATE'] )
gLogger.verbose("The end date of %s has been changed!" % downtimes[3])
if not result[ 'OK' ]:
return result
return S_OK()
def doCache( self ):
return S_OK()
def doMaster( self ):
"""
This method calls the doNew method for each hostname that exists
in the DowntimeCache table of the local database.
:return: S_OK / S_ERROR
"""
# Query DB for all downtimes
result = self.rmClient.selectDowntimeCache()
if not result[ 'OK' ]:
return result
for data in result['Value']:
# If already processed don't do it again
if data[0] in self.seenHostnames:
continue
# data[0] contains the hostname
gLogger.verbose("Checking if the downtime of %s has been changed" % data[0])
result = self.doNew( data[0] )
if not result[ 'OK' ]:
return result
self.seenHostnames.add( data[0] )
return S_OK()
| Andrew-McNab-UK/DIRAC | ResourceStatusSystem/Command/GOCDBSyncCommand.py | Python | gpl-3.0 | 4,416 | [
"DIRAC"
] | 3fc25304e5c8633266871e36125ac07699408abbf890d34f811e780724bb0916 |
import os
from octopus.server.DBInterface import DBInterface
from octopus.mlutils.pythonEmbedder.FeatureArray import FeatureArray
from octopus.mlutils.pythonEmbedder.FeatureArrayToMatrix import FeatureArrayToMatrix
"""
Contributed by @yangke:
For a given output directory, generate a TOC File,
create an APISymbol embedding in libsvm format and save it
as "embedding.libsvm" in the output directory. where the TOC File
records a list of the functionIds in a coresponding order
with the file:"embedding.libsvm".
Unlike the other disk writing embedder, this embedder does not
generate the APISymbol features. So it may provide a better
performance for programs which are only focusing on the similarity
between functions.
"""
class APIEmbedder(object):
def __init__(self):
self._initializeDBConnection()
def _initializeDBConnection(self):
self.dbInterface = DBInterface()
def setOutputDirectory(self, directory):
self.outputDirectory = directory
def run(self,tfidf=True):
try:
# Will throw error if output directory already exists
self._initializeOutputDirectory()
except:
return
self._connectToDatabase()
functions = self._getAPISymbolsFromDatabase()
featureArray = self._createFeatureArray(functions)
self._finalizeOutputDirectory()
self.termDocMatrix = self._createTermDocumentMatrix(featureArray)
if tfidf:
self.termDocMatrix.tfidf()
self._outputInLIBSVMFormat(self.outputDirectory)
def _connectToDatabase(self):
self.dbInterface.connectToDatabase()
def _initializeOutputDirectory(self):
directory = self.outputDirectory
if os.path.exists(directory):
raise
os.makedirs(directory)
self.tocFilename = os.path.join(directory, 'TOC')
self.toc = open(self.tocFilename, 'w')
def _finalizeOutputDirectory(self):
self.toc.close()
def _getAPISymbolsFromDatabase(self):
CHUNK_SIZE = 1024
query = """queryNodeIndex('type:Function').id"""
functionIds = self._runGremlinQuery(query)
result = []
for chunk in self.chunks(functionIds, CHUNK_SIZE):
query = """
_().transform{ %s }.scatter().transform{g.v(it)}
.sideEffect{funcId = it.id}
.transform{ [funcId, it.functionToAPISymbolNodes().code.toList()] }
""" % (str(chunk))
result.extend(self._runGremlinQuery(query))
return result
def chunks(self, l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def _runGremlinQuery(self, query):
return self.dbInterface.runGremlinQuery(query)
def _createFeatureArray(self, functions):
featureArray = FeatureArray()
for index,(funcId, symbols) in enumerate(functions):
for i in range(len(symbols)):
symbols[i]= symbols[i]+'\n'
featureArray.add(index, symbols)#label,items
self.toc.write("%d\n" % (funcId))
self.toc.flush()
return featureArray
def _createTermDocumentMatrix(self, featureArray):
converter = FeatureArrayToMatrix()
return converter.convertFeatureArray(featureArray)
def _outputInLIBSVMFormat(self, directory):
from scipy.sparse import csc_matrix
if self.termDocMatrix.matrix == None: return
m = csc_matrix(self.termDocMatrix.matrix)
nCols = m.shape[1]
outFilename = os.path.join(directory, 'embedding.libsvm')
outFile = open(outFilename, 'w')
for i in range(nCols):
label = self.termDocMatrix.index2Doc[i]
col = m.getcol(i)
entries = [(i,col[i,0]) for i in col.indices]
entries.sort()
features = " ".join(['%d:%f' % e for e in entries])
row = '%s %s #%s\n' % (label, features, label)
outFile.write(row)
outFile.close()
if __name__ == '__main__':
import sys
embeder = APIEmbedder()
embedder.setOutputDirectory(sys.argv[1])
embeder.run()
| octopus-platform/joern | python/joern-tools/joern/SimplifiedAPIEmbedder.py | Python | lgpl-3.0 | 4,266 | [
"Octopus"
] | 86dfd92acaf3e9e4b7de10a2b66e561e16486758cc233c928333f5ede0a9f277 |
##
# This file is an EasyBuild reciPY as per https://github.com/easybuilders/easybuild
#
# Copyright:: Copyright 2012-2019 Uni.Lu/LCSB, NTUA
# Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Kenneth Hoste
# Authors:: George Tsouloupas <g.tsouloupas@cyi.ac.cy>, Fotis Georgatos <fotis@cern.ch>
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html
##
"""
EasyBuild support for building and installing BWA, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
@author: George Tsouloupas <g.tsouloupas@cyi.ac.cy>
"""
import os
import shutil
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
class EB_BWA(ConfigureMake):
"""
Support for building BWA
"""
def __init__(self, *args, **kwargs):
"""Add extra config options specific to BWA."""
super(EB_BWA, self).__init__(*args, **kwargs)
self.files = []
def configure_step(self):
"""
Empty function as bwa comes with _no_ configure script
"""
self.files = ["bwa", "qualfa2fq.pl", "xa2multi.pl"]
if LooseVersion(self.version) < LooseVersion("0.7.0"):
# solid2fastq was dropped in recent versions because the same functionality is covered by other tools already
# cfr. http://osdir.com/ml/general/2010-10/msg26205.html
self.files.append("solid2fastq.pl")
def install_step(self):
"""
Install by copying files to install dir
"""
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
mandir = os.path.join(self.installdir, 'man')
manman1dir = os.path.join(self.installdir, 'man/man1')
manfile = os.path.join(srcdir, 'bwa.1')
srcfile = None
try:
os.makedirs(destdir)
os.makedirs(mandir)
os.makedirs(manman1dir)
for filename in self.files:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
shutil.copy2(manfile, manman1dir)
except OSError as err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err)
def sanity_check_step(self):
"""Custom sanity check for BWA."""
custom_paths = {
'files': ["bin/%s" % x for x in self.files],
'dirs': []
}
super(EB_BWA, self).sanity_check_step(custom_paths=custom_paths)
| pescobar/easybuild-easyblocks | easybuild/easyblocks/b/bwa.py | Python | gpl-2.0 | 2,741 | [
"BWA"
] | 1a411bd27e0e06a3180fa4ca3cd2d4c1e689eeb454f97907d3347abbd3d9f1fa |
#!/usr/bin/env python
# Copyright (c) 2015, Amit Zeisel, Gioele La Manno and Sten Linnarsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This .py file can be used as a library or a command-line version of BackSPIN,
# This version of BackSPIN was implemented by Gioele La Manno.
# The BackSPIN biclustering algorithm was developed by Amit Zeisel and is described
# in Zeisel et al. Cell types in the mouse cortex and hippocampus revealed by
# single-cell RNA-seq Science 2015 (PMID: 25700174, doi: 10.1126/science.aaa1934).
#
# Building using pyinstaller:
# pyinstaller -F backSPIN.py -n backspin-mac-64-bit
#
from __future__ import division, print_function, absolute_import
from numpy import *
import getopt
import sys
import os
from .Cef_tools import CEF_obj
class Results:
pass
def calc_loccenter(x, lin_log_flag):
M,N = x.shape
if N==1 and M>1:
x = x.T
M,N = x.shape
loc_center = zeros(M)
min_x = x.min(1)
x = x - min_x[:,newaxis]
for i in range(M):
ind = where(x[i,:]>0)[0]
if len(ind) != 0:
if lin_log_flag == 1:
w = x[i,ind]/sum(x[i,ind], 0)
else:
w = (2**x[i,ind])/sum(2**x[i,ind], 0)
loc_center[i] = sum(w*ind, 0)
else:
loc_center[i] = 0
return loc_center
def _calc_weights_matrix(mat_size, wid):
'''Calculate Weight Matrix
Parameters
----------
mat_size: int
dimension of the distance matrix
wid: int
parameter that controls the width of the neighbourood
Returns
-------
weights_mat: 2-D array
the weights matrix to multiply with the distance matrix
'''
#calculate square distance from the diagonal
sqd = (arange(1,mat_size+1)[newaxis,:] - arange(1,mat_size+1)[:,newaxis])**2
#make the distance relative to the mat_size
norm_sqd = sqd/wid
#evaluate a normal pdf
weights_mat = exp(-norm_sqd/mat_size)
#avoid useless precision that would slow down the matrix multiplication
weights_mat -= 1e-6
weights_mat[weights_mat<0] = 0
#normalize row and column sum
weights_mat /= sum(weights_mat,0)[newaxis,:]
weights_mat /= sum(weights_mat,1)[:, newaxis]
#fix asimmetries
weights_mat = (weights_mat + weights_mat.T) / 2.
return weights_mat
def _sort_neighbourhood( dist_matrix, wid ):
'''Perform a single iteration of SPIN
Parameters
----------
dist_matrix: 2-D array
distance matrix
wid: int
parameter that controls the width of the neighbourood
Returns
-------
sorted_ind: 1-D array
indexes that order the matrix
'''
assert wid > 0, 'Parameter wid < 0 is not allowed'
mat_size = dist_matrix.shape[0]
#assert mat_size>2, 'Matrix is too small to be sorted'
weights_mat = _calc_weights_matrix(mat_size, wid)
#Calculate the dot product (can be very slow for big mat_size)
mismatch_score = dot(dist_matrix, weights_mat)
energy, target_permutation = mismatch_score.min(1), mismatch_score.argmin(1)
max_energy = max(energy)
#Avoid points that have the same target_permutation value
sort_score = target_permutation - 0.1 * sign( (mat_size/2 - target_permutation) ) * energy/max_energy
#sort_score = target_permutation - 0.1 * sign( 1-2*(int(1000*energy/max_energy) % 2) ) * energy/max_energy # Alternative
# Sorting the matrix
sorted_ind = sort_score.argsort(0)[::-1]
return sorted_ind
def sort_mat_by_neighborhood(dist_matrix, wid, times):
'''Perform several iterations of SPIN using a fixed wid parameter
Parameters
----------
dist_matrix: 2-D array
distance matrix
wid: int
parameter that controls the width of the neighbourood
times: int
number of repetitions
verbose: bool
print the progress
Returns
-------
indexes: 1-D array
indexes that order the matrix
'''
# original indexes
indexes = arange(dist_matrix.shape[0])
for i in range(times):
#sort the sitance matrix according the previous iteration
tmpmat = dist_matrix[indexes,:]
tmpmat = tmpmat[:,indexes]
sorted_ind = _sort_neighbourhood(tmpmat, wid);
#resort the original indexes
indexes = indexes[sorted_ind]
return indexes
def _generate_widlist(data, axis=1, step=0.6):
'''Generate a list of wid parameters to execute sort_mat_by_neighborhood
Parameters
----------
data: 2-D array
the data matrix
axis: int
the axis to take in consideration
step: float
the increment between two successive wid parameters
Returns
-------
wid_list: list of int
list of wid parameters to run SPIN
'''
max_wid = data.shape[axis]*0.6
new_wid = 1
wid_list = []
while new_wid < (1+step)*max_wid:
wid_list.append( new_wid )
new_wid = int(ceil( new_wid + new_wid*(step) +1))
return wid_list[::-1]
def SPIN(dt, widlist=[10,1], iters=30, axis='both', verbose=False):
"""Run the original SPIN algorithm
Parameters
----------
dt: 2-D array
the data matrix
widlist: float or list of int
If float is passed, it is used as step parameted of _generate_widlist,
and widlist is generated to run SPIN.
If list is passed it is used directly to run SPIN.
iters: int
number of repetitions for every wid in widlist
axis: int
the axis to take in consideration (must be 0, 1 or 'both')
step: float
the increment between two successive wid parameters
Returns
-------
indexes: 1-D array (if axis in [0,1]) or tuple of 1-D array (if axis = 'both')
indexes that sort the data matrix
Notes
-----
Typical usage
sorted_dt0 = SPIN(dt, iters=30, axis=0)
sorted_dt1 = SPIN(dt, iters=30, axis=1)
dt = dt[sorted_dt0,:]
dt = dt[:,sorted_dt1]
"""
IXc = arange(dt.shape[1])
IXr = arange(dt.shape[0])
assert axis in ['both', 0,1], 'axis must be 0, 1 or \'both\' '
#Sort both axis
if axis == 'both':
CCc = 1 - corrcoef(dt.T)
CCr = 1 - corrcoef(dt)
if type(widlist) != list:
widlist_r = _generate_widlist(dt, axis=0, step=widlist)
widlist_c = _generate_widlist(dt, axis=1, step=widlist)
if verbose:
print('\nSorting genes.')
print('Neighbourood=', end=""),
for wid in widlist_r:
if verbose:
print ('%i, ' % wid),
sys.stdout.flush()
INDr = sort_mat_by_neighborhood(CCr, wid, iters)
CCr = CCr[INDr,:][:,INDr]
IXr = IXr[INDr]
if verbose:
print ('\nSorting cells.')
print ('Neighbourood=',end="")
for wid in widlist_c:
if verbose:
print ('%i, ' % wid),
sys.stdout.flush()
INDc = sort_mat_by_neighborhood(CCc, wid, iters)
CCc = CCc[:,INDc][INDc,:]
IXc= IXc[INDc]
return IXr, IXc
#Sort rows
elif axis == 0:
CCr = 1 - corrcoef(dt)
if type(widlist) != list:
widlist = _generate_widlist(dt, axis=0, step=widlist)
if verbose:
print ('\nSorting genes.\nNeighbourood=',end="")
for wid in widlist:
if verbose:
print ('%i, ' % wid,end="")
sys.stdout.flush()
INDr = sort_mat_by_neighborhood(CCr, wid, iters)
CCr = CCr[INDr,:][:,INDr]
IXr = IXr[INDr]
return IXr
#Sort columns
elif axis == 1:
CCc = 1 - corrcoef(dt.T)
if type(widlist) != list:
widlist = _generate_widlist(dt, axis=1, step=widlist)
if verbose:
print ('\nSorting cells.\nNeighbourood=',end="")
for wid in widlist:
if verbose:
print ('%i, ' % wid,end="")
sys.stdout.flush()
INDc = sort_mat_by_neighborhood(CCc, wid, iters)
CCc = CCc[:,INDc][INDc,:]
IXc = IXc[INDc]
return IXc
def backSPIN(data, numLevels=2, first_run_iters=10, first_run_step=0.05, runs_iters=8 ,runs_step=0.25,\
split_limit_g=2, split_limit_c=2, stop_const = 1.15, low_thrs=0.2, verbose=False):
'''Run the backSPIN algorithm
Parameters
----------
data: 2-D array
the data matrix, rows should be genes and columns single cells/samples
numLevels: int
the number of splits that will be tried
first_run_iters: float
the iterations of the preparatory SPIN
first_run_step: float
the step parameter passed to _generate_widlist for the preparatory SPIN
runs_iters: int
the iterations parameter passed to the _divide_to_2and_resort.
influences all the SPIN iterations except the first
runs_step: float
the step parameter passed to the _divide_to_2and_resort.
influences all the SPIN iterations except the first
wid: float
the wid of every iteration of the splitting and resorting
split_limit_g: int
If the number of specific genes in a subgroup is smaller than this number
splitting of that subgrup is not allowed
split_limit_c: int
If the number cells in a subgroup is smaller than this number splitting of
that subgrup is not allowed
stop_const: float
minimum score that a breaking point has to reach to be suitable for splitting
low_thrs: float
genes with average lower than this threshold are assigned to either of the
splitting group reling on genes that are higly correlated with them
Returns
-------
results: Result object
The results object contain the following attributes
genes_order: 1-D array
indexes (a permutation) sorting the genes
cells_order: 1-D array
indexes (a permutation) sorting the cells
genes_gr_level: 2-D array
for each depth level contains the cluster indexes for each gene
cells_gr_level:
for each depth level contains the cluster indexes for each cell
cells_gr_level_sc:
score of the splitting
genes_bor_level:
the border index between gene clusters
cells_bor_level:
the border index between cell clusters
Notes
-----
Typical usage
'''
assert numLevels>0, '0 is not an available depth for backSPIN, use SPIN instead'
#initialize some varaibles
genes_bor_level = [[] for i in range(numLevels)]
cells_bor_level = [[] for i in range(numLevels)]
N,M = data.shape
genes_order = arange(N)
cells_order = arange(M)
genes_gr_level = zeros((N,numLevels+1))
cells_gr_level = zeros((M,numLevels+1))
cells_gr_level_sc = zeros((M,numLevels+1))
# Do a Preparatory SPIN on cells
if verbose:
print ('\nPreparatory SPIN')
ix1 = SPIN(data, widlist=_generate_widlist(data, axis=1, step=first_run_step), iters=first_run_iters, axis=1, verbose=verbose)
cells_order = cells_order[ix1]
#For every level of depth DO:
for i in range(numLevels):
k=0 # initialize group id counter
# For every group generated at the parent level DO:
for j in range( len( set(cells_gr_level[:,i]) ) ):
# Extract the a data matrix of the genes at that level
g_settmp = nonzero(genes_gr_level[:,i]==j)[0] #indexes of genes in the level j
c_settmp = nonzero(cells_gr_level[:,i]==j)[0] #indexes of cells in the level j
datatmp = data[ ix_(genes_order[g_settmp], cells_order[c_settmp]) ]
# If we are not below the splitting limit for both genes and cells DO:
if (len(g_settmp)>split_limit_g) & (len(c_settmp)>split_limit_c):
# Split and SPINsort the two halves
if i == numLevels-1:
divided = _divide_to_2and_resort(datatmp, wid=runs_step, iters_spin=runs_iters,\
stop_const=stop_const, low_thrs=low_thrs, sort_genes=True, verbose=verbose)
else:
divided = _divide_to_2and_resort(datatmp, wid=runs_step, iters_spin=runs_iters,\
stop_const=stop_const, low_thrs=low_thrs, sort_genes=False,verbose=verbose)
# _divide_to_2and_resort retruns an empty array in gr2 if the splitting condition was not satisfied
if divided:
sorted_data_resort1, genes_resort1, cells_resort1,\
gr1, gr2, genesgr1, genesgr2, score1, score2 = divided
# Resort from the previous level
genes_order[g_settmp] = genes_order[g_settmp[genes_resort1]]
cells_order[c_settmp] = cells_order[c_settmp[cells_resort1]]
# Assign a numerical identifier to the groups
genes_gr_level[g_settmp[genesgr1],i+1] = k
genes_gr_level[g_settmp[genesgr2],i+1] = k+1
cells_gr_level[c_settmp[gr1],i+1] = k
cells_gr_level[c_settmp[gr2],i+1] = k+1
# Not really clear what sc is
cells_gr_level_sc[c_settmp[gr1],i+1] = score1
cells_gr_level_sc[c_settmp[gr2],i+1] = score2
# Augment the counter of 2 becouse two groups were generated from one
k = k+2
else:
# The split is not convenient, keep everithing the same
genes_gr_level[g_settmp,i+1] = k
# if it is the deepest level: perform gene sorting
if i == numLevels-1:
if (datatmp.shape[0] > 2 )and (datatmp.shape[1] > 2):
genes_resort1 = SPIN(datatmp, widlist=runs_step, iters=runs_iters, axis=0, verbose=verbose)
genes_order[g_settmp] = genes_order[g_settmp[genes_resort1]]
cells_gr_level[c_settmp,i+1] = k
cells_gr_level_sc[c_settmp,i+1] = cells_gr_level_sc[c_settmp,i]
# Augment of 1 becouse no new group was generated
k = k+1
else:
# Below the splitting limit: the split is not convenient, keep everithing the same
genes_gr_level[g_settmp,i+1] = k
cells_gr_level[c_settmp,i+1] = k
cells_gr_level_sc[c_settmp,i+1] = cells_gr_level_sc[c_settmp,i]
# Augment of 1 becouse no new group was generated
k = k+1
# Find boundaries
genes_bor_level[i] = r_[0, nonzero(diff(genes_gr_level[:,i+1])>0)[0]+1, data.shape[0] ]
cells_bor_level[i] = r_[0, nonzero(diff(cells_gr_level[:,i+1])>0)[0]+1, data.shape[1] ]
#dataout_sorted = data[ ix_(genes_order,cells_order) ]
results = Results()
results.genes_order = genes_order
results.cells_order = cells_order
results.genes_gr_level = genes_gr_level
results.cells_gr_level = cells_gr_level
results.cells_gr_level_sc = cells_gr_level_sc
results.genes_bor_level = genes_bor_level
results.cells_bor_level = cells_bor_level
return results
def _divide_to_2and_resort(sorted_data, wid, iters_spin=8, stop_const = 1.15, low_thrs=0.2 , sort_genes=True, verbose=False):
'''Core function of backSPIN: split the datamatrix in two and resort the two halves
Parameters
----------
sorted_data: 2-D array
the data matrix, rows should be genes and columns single cells/samples
wid: float
wid parameter to give to widlist parameter of th SPIN fucntion
stop_const: float
minimum score that a breaking point has to reach to be suitable for splitting
low_thrs: float
if the difference between the average expression of two groups is lower than threshold the algorythm
uses higly correlated gens to assign the gene to one of the two groups
verbose: bool
information about the split is printed
Returns
-------
'''
# Calculate correlation matrix for cells and genes
Rcells = corrcoef(sorted_data.T)
Rgenes = corrcoef(sorted_data)
# Look for the optimal breaking point
N = Rcells.shape[0]
score = zeros(N)
for i in range(2,N-2):
if i == 2:
tmp1 = sum( Rcells[:i,:i] )
tmp2 = sum( Rcells[i:,i:] )
score[i] = (tmp1+tmp2) / float(i**2 + (N-i)**2)
else:
tmp1 += sum(Rcells[i-1,:i]) + sum(Rcells[:i-1,i-1]);
tmp2 -= sum(Rcells[i-1:,i-1]) + sum(Rcells[i-1,i:]);
score[i] = (tmp1+tmp2) / float(i**2 + (N-i)**2)
breakp1 = argmax(score)
score1 = Rcells[:breakp1,:breakp1]
score1 = triu(score1)
score1 = mean( score1[score1 != 0] )
score2 = Rcells[breakp1:, breakp1:]
score2 = triu(score2)
score2 = mean( score2[score2 != 0] )
avg_tot = triu(Rcells)
avg_tot = mean( avg_tot[avg_tot != 0] )
# If it is convenient to break
if (max([score1,score2])/avg_tot) > stop_const:
# Divide in two groups
gr1 = arange(N)[:breakp1]
gr2 = arange(N)[breakp1:]
# and assign the genes into the two groups
mean_gr1 = sorted_data[:, gr1].mean(1)
mean_gr2 = sorted_data[:, gr2].mean(1)
concat_loccenter_gr1 = c_[ calc_loccenter(sorted_data[:,gr1], 2), calc_loccenter(sorted_data[:,gr1][...,::-1], 2) ]
concat_loccenter_gr2 = c_[ calc_loccenter(sorted_data[:,gr2], 2), calc_loccenter(sorted_data[:,gr2][...,::-1], 2) ]
center_gr1, flip_flag1 = concat_loccenter_gr1.min(1), concat_loccenter_gr1.argmin(1)
center_gr2, flip_flag2 = concat_loccenter_gr2.max(1), concat_loccenter_gr2.argmax(1)
sorted_data_tmp = array( sorted_data )
sorted_data_tmp[ix_(flip_flag1==1,gr1)] = sorted_data[ix_(flip_flag1==1,gr1)][...,::-1]
sorted_data_tmp[ix_(flip_flag2==1,gr2)] = sorted_data[ix_(flip_flag2==1,gr2)][...,::-1]
loc_center = calc_loccenter(sorted_data_tmp, 2)
imax = zeros(loc_center.shape)
imax[loc_center<=breakp1] = 1
imax[loc_center>breakp1] = 2
genesgr1 = where(imax==1)[0]
genesgr2 = where(imax==2)[0]
if size(genesgr1) == 0:
IN = argmax(mean_gr1)
genesgr1 = array([IN])
genesgr2 = setdiff1d(genesgr2, IN)
elif size(genesgr2) == 0:
IN = argmax(mean_gr2)
genesgr2 = array([IN])
genesgr1 = setdiff1d(genesgr1, IN)
if verbose:
print ('\nSplitting (%i, %i) ' % sorted_data.shape)
print ('in (%i,%i) ' % (genesgr1.shape[0],gr1.shape[0]))
print ('and (%i,%i)' % (genesgr2.shape[0],gr2.shape[0]),end="")
sys.stdout.flush()
# Data of group1
datagr1 = sorted_data[ix_(genesgr1,gr1)]
# zero center
datagr1 = datagr1 - datagr1.mean(1)[:,newaxis]
# Resort group1
if min( datagr1.shape ) > 1:
if sort_genes:
genesorder1,cellorder1 = SPIN(datagr1, widlist=wid, iters=iters_spin, axis='both', verbose=verbose)
else:
cellorder1 = SPIN(datagr1, widlist=wid, iters=iters_spin, axis=1, verbose=verbose)
genesorder1 = arange(datagr1.shape[0])
elif len(genesgr1) == 1:
genesorder1 = 0
cellorder1 = argsort( datagr1[0,:] )
elif len(gr1) == 1:
cellorder1 = 0
genesorder1 = argsort( datagr1[:,0] )
# Data of group2
datagr2 = sorted_data[ix_(genesgr2,gr2)]
# zero center
datagr2 = datagr2 - datagr2.mean(1)[:,newaxis]
# Resort group2
if min( datagr2.shape )>1:
if sort_genes:
genesorder2, cellorder2 = SPIN(datagr2, widlist=wid, iters=iters_spin, axis='both',verbose=verbose)
else:
cellorder2 = SPIN(datagr2, widlist=wid, iters=iters_spin, axis=1,verbose=verbose)
genesorder2 = arange(datagr2.shape[0])
elif len(genesgr2) == 1:
genesorder2 = 0
cellorder2 = argsort(datagr2[0,:])
elif len(gr2) == 1:
cellorder2 = 0
genesorder2 = argsort(datagr2[:,0])
# contcatenate cells and genes indexes
genes_resort1 = r_[genesgr1[genesorder1], genesgr2[genesorder2] ]
cells_resort1 = r_[gr1[cellorder1], gr2[cellorder2] ]
genesgr1 = arange(len(genesgr1))
genesgr2 = arange(len(genesgr1), len(sorted_data[:,0]))
# resort
sorted_data_resort1 = sorted_data[ix_(genes_resort1,cells_resort1)]
return sorted_data_resort1, genes_resort1, cells_resort1, gr1, gr2, genesgr1, genesgr2, score1, score2
else:
if verbose:
print('Low splitting score was : %.4f' % (max([score1,score2])/avg_tot))
return False
def fit_CV(mu, cv, fit_method='Exp', svr_gamma=0.06, x0=[0.5,0.5], verbose=False):
'''Fits a noise model (CV vs mean)
Parameters
----------
mu: 1-D array
mean of the genes (raw counts)
cv: 1-D array
coefficient of variation for each gene
fit_method: string
allowed: 'SVR', 'Exp', 'binSVR', 'binExp'
default: 'SVR'(requires scikit learn)
SVR: uses Support vector regression to fit the noise model
Exp: Parametric fit to cv = mu^(-a) + b
bin: before fitting the distribution of mean is normalized to be
uniform by downsampling and resampling.
Returns
-------
score: 1-D array
Score is the relative position with respect of the fitted curve
mu_linspace: 1-D array
x coordiantes to plot (min(log2(mu)) -> max(log2(mu)))
cv_fit: 1-D array
y=f(x) coordinates to plot
pars: tuple or None
'''
log2_m = log2(mu)
log2_cv = log2(cv)
if len(mu)>1000 and 'bin' in fit_method:
#histogram with 30 bins
n,xi = histogram(log2_m,30)
med_n = percentile(n,50)
for i in range(0,len(n)):
# index of genes within the ith bin
ind = where( (log2_m >= xi[i]) & (log2_m < xi[i+1]) )[0].astype(int)
if len(ind)>med_n:
#Downsample if count is more than median
ind = ind[random.permutation(len(ind))]
ind = ind[:len(ind)-int(med_n)]
mask = ones(len(log2_m), dtype=bool)
mask[ind] = False
log2_m = log2_m[mask]
log2_cv = log2_cv[mask]
elif (around(med_n/len(ind))>1) and (len(ind)>5):
#Duplicate if count is less than median
log2_m = r_[ log2_m, tile(log2_m[ind], int(round(med_n/len(ind))-1)) ]
log2_cv = r_[ log2_cv, tile(log2_cv[ind], int(round(med_n/len(ind))-1)) ]
else:
if 'bin' in fit_method:
print('More than 1000 input feature needed for bin correction.')
pass
if 'SVR' in fit_method:
try:
from sklearn.svm import SVR
if svr_gamma == 'auto':
svr_gamma = 1000./len(mu)
#Fit the Support Vector Regression
clf = SVR(gamma=svr_gamma)
clf.fit(log2_m[:,newaxis], log2_cv)
fitted_fun = clf.predict
score = log2(cv) - fitted_fun(log2(mu)[:,newaxis])
params = None
#The coordinates of the fitted curve
mu_linspace = linspace(min(log2_m),max(log2_m))
cv_fit = fitted_fun(mu_linspace[:,newaxis])
return score, mu_linspace, cv_fit , params
except ImportError:
if verbose:
print('SVR fit requires scikit-learn python library. Using exponential instead.')
if 'bin' in fit_method:
return fit_CV(mu, cv, fit_method='binExp', x0=x0)
else:
return fit_CV(mu, cv, fit_method='Exp', x0=x0)
elif 'Exp' in fit_method:
from scipy.optimize import minimize
#Define the objective function to fit (least squares)
fun = lambda x, log2_m, log2_cv: sum(abs( log2( (2.**log2_m)**(-x[0])+x[1]) - log2_cv ))
#Fit using Nelder-Mead algorythm
optimization = minimize(fun, x0, args=(log2_m,log2_cv), method='Nelder-Mead')
params = optimization.x
#The fitted function
fitted_fun = lambda log_mu: log2( (2.**log_mu)**(-params[0]) + params[1])
# Score is the relative position with respect of the fitted curve
score = log2(cv) - fitted_fun(log2(mu))
#The coordinates of the fitted curve
mu_linspace = linspace(min(log2_m),max(log2_m))
cv_fit = fitted_fun(mu_linspace)
return score, mu_linspace, cv_fit , params
def feature_selection(data,thrs, verbose=False):
if thrs>= data.shape[0]:
if verbose:
print ("Trying to select %i features but only %i genes available." %( thrs, data.shape[0]))
print ("Skipping feature selection")
return arange(data.shape[0])
ix_genes = arange(data.shape[0])
threeperK = int(ceil(3*data.shape[1]/1000.))
zerotwoperK = int(floor(0.3*data.shape[1]/1000.))
# is at least 1 molecule in 0.3% of thecells, is at least 2 molecules in 0.03% of the cells
condition = (sum(data>=1, 1)>= threeperK) & (sum(data>=2, 1)>=zerotwoperK)
ix_genes = ix_genes[condition]
mu = data[ix_genes,:].mean(1)
sigma = data[ix_genes,:].std(1, ddof=1)
cv = sigma/mu
try:
score, mu_linspace, cv_fit , params = fit_CV(mu,cv,fit_method='SVR', verbose=verbose)
except ImportError:
print ("WARNING: Feature selection was skipped becouse scipy is required. Install scipy to run feature selection.")
return arange(data.shape[0])
return ix_genes[argsort(score)[::-1]][:thrs]
def usage_quick():
message ='''usage: backSPIN [-hbv] [-i inputfile] [-o outputfolder] [-d int] [-f int] [-t int] [-s float] [-T int] [-S float] [-g int] [-c int] [-k float] [-r float]
manual: backSPIN -h
'''
print (message)
def usage():
message='''
backSPIN commandline tool
-------------------------
The options are as follows:
-i [inputfile]
--input=[inputfile]
Path of the cef formatted tab delimited file.
Rows should be genes and columns single cells/samples.
For further information on the cef format visit:
https://github.com/linnarsson-lab/ceftools
-o [outputfile]
--output=[outputfile]
The name of the file to which the output will be written
-d [int]
Depth/Number of levels: The number of nested splits that will be tried by the algorithm
-t [int]
Number of the iterations used in the preparatory SPIN.
Defaults to 10
-f [int]
Feature selection is performed before BackSPIN. Argument controls how many genes are seleceted.
Selection is based on expected noise (a curve fit to the CV-vs-mean plot).
-s [float]
Controls the decrease rate of the width parameter used in the preparatory SPIN.
Smaller values will increase the number of SPIN iterations and result in higher
precision in the first step but longer execution time.
Defaults to 0.1
-T [int]
Number of the iterations used for every width parameter.
Does not apply on the first run (use -t instead)
Defaults to 8
-S [float]
Controls the decrease rate of the width parameter.
Smaller values will increase the number of SPIN iterations and result in higher
precision but longer execution time.
Does not apply on the first run (use -s instead)
Defaults to 0.3
-g [int]
Minimal number of genes that a group must contain for splitting to be allowed.
Defaults to 2
-c [int]
Minimal number of cells that a group must contain for splitting to be allowed.
Defaults to 2
-k [float]
Minimum score that a breaking point has to reach to be suitable for splitting.
Defaults to 1.15
-r [float]
If the difference between the average expression of two groups is lower than threshold the algorythm
uses higly correlated genes to assign the gene to one of the two groups
Defaults to 0.2
-b [axisvalue]
Run normal SPIN instead of backSPIN.
Normal spin accepts the parameters -T -S
An axis value 0 to only sort genes (rows), 1 to only sort cells (columns) or 'both' for both
must be passed
-v
Verbose. Print to the stdoutput extra details of what is happening
'''
print(message)
if __name__ == '__main__':
print("")
#defaults arguments
input_path = None
outfiles_path = None
numLevels=2 # -d
feature_fit = False # -f
feature_genes = 2000
first_run_iters=10 # -t
first_run_step=0.1 # -s
runs_iters=8 # -T
runs_step=0.3 # -S
split_limit_g=2 # -g
split_limit_c=2 # -c
stop_const = 1.15 # -k
low_thrs=0.2 # -r
normal_spin = False #-b
normal_spin_axis = 'both'
verbose=False # -v
optlist, args = getopt.gnu_getopt(sys.argv[1:], "hvi:o:f:d:t:s:T:S:g:c:k:r:b:", ["help", "input=","output="])
if optlist== [] and args == []:
usage_quick()
sys.exit()
for opt, a in optlist:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ('-i', '--input'):
input_path = a
elif opt in ("-o", "--output"):
outfiles_path = a
elif opt == '-d':
numLevels = int(a)
elif opt == '-f':
feature_fit = True
if a != '':
feature_genes = int(a)
elif opt == '-t':
first_run_iters = int(a)
elif opt == '-s':
first_run_step = float(a)
elif opt == '-T':
runs_iters = int(a)
elif opt == '-S':
runs_step = float(a)
elif opt == '-g':
split_limit_g = int(a)
elif opt == '-c':
split_limit_c = int(a)
elif opt == '-k':
stop_const = float(a)
elif opt == '-r':
low_thrs = float(a)
elif opt == '-v':
verbose = True
elif opt == '-b':
normal_spin = True
if a != '':
if a == 'both':
normal_spin_axis = a
else:
normal_spin_axis = int(a)
else:
assert False, "%s option is not supported" % opt
if input_path == None:
print ('No input file was provided.\nYou need to specify an input file\n(e.g. backSPIN -i path/to/your/file/foo.cef)\n')
sys.exit()
if outfiles_path == None:
print ('No output file was provided.\nYou need to specify an output file\n(e.g. backSPIN -o path/to/your/file/bar.cef)\n')
sys.exit()
try:
if verbose:
print ('Loading file.')
input_cef = CEF_obj()
input_cef.readCEF(input_path)
data = array(input_cef.matrix)
if feature_fit:
if verbose:
print ("Performing feature selection")
ix_features = feature_selection(data, feature_genes, verbose=verbose)
if verbose:
print ("Selected %i genes" % len(ix_features))
data = data[ix_features, :]
input_cef.matrix = data.tolist()
input_cef.row_attr_values = atleast_2d( array( input_cef.row_attr_values ))[:,ix_features].tolist()
input_cef.update()
data = log2(data+1)
data = data - data.mean(1)[:,newaxis]
if data.shape[0] <= 3 and data.shape[1] <= 3:
print ('Input file is not correctly formatted.\n')
sys.exit()
except Exception as err:
import traceback
print ('There was an error')
print (traceback.format_exc())
print ('Error occurred in parsing the input file.')
print ('Please check that your input file is a correctly formatted cef file.\n')
sys.exit()
if normal_spin == False:
print ('backSPIN started\n----------------\n')
print ('Input file:\n%s\n' % input_path)
print ('Output file:\n%s\n' % outfiles_path)
print ('numLevels: %i\nfirst_run_iters: %i\nfirst_run_step: %.3f\nruns_iters: %i\nruns_step: %.3f\nsplit_limit_g: %i\nsplit_limit_c: %i\nstop_const: %.3f\nlow_thrs: %.3f\n' % (numLevels, first_run_iters, first_run_step, runs_iters,\
runs_step, split_limit_g, split_limit_c, stop_const, low_thrs))
results = backSPIN(data, numLevels, first_run_iters, first_run_step, runs_iters, runs_step,\
split_limit_g, split_limit_c, stop_const, low_thrs, verbose)
sys.stdout.flush()
print ('\nWriting output.\n')
output_cef = CEF_obj()
for h_name, h_val in zip( input_cef.header_names, input_cef.header_values):
output_cef.add_header(h_name, h_val )
for c_name, c_val in zip( input_cef.col_attr_names, input_cef.col_attr_values):
output_cef.add_col_attr(c_name, array(c_val)[results.cells_order])
for r_name, r_val in zip( input_cef.row_attr_names, input_cef.row_attr_values):
output_cef.add_row_attr(r_name, array(r_val)[results.genes_order])
for level, groups in enumerate( results.genes_gr_level.T ):
output_cef.add_row_attr('Level_%i_group' % level, [int(el) for el in groups])
for level, groups in enumerate( results.cells_gr_level.T ):
output_cef.add_col_attr('Level_%i_group' % level, [int(el) for el in groups])
output_cef.set_matrix(array(input_cef.matrix)[results.genes_order,:][:,results.cells_order])
if sum(type(i)==float for i in input_cef.matrix[0]) + sum(type(i)==float for i in input_cef.matrix[-1]) == 0:
fmt = '%i'
else:
fmt ='%.6g'
output_cef.writeCEF( outfiles_path, matrix_str_fmt=fmt )
else:
print ('normal SPIN started\n----------------\n')
print ('Input file:\n%s\n' % input_path)
print ('Output file:\n%s\n' % outfiles_path)
results = SPIN(data, widlist=runs_step, iters=runs_iters, axis=normal_spin_axis, verbose=verbose)
print ('\nWriting output.\n')
output_cef = CEF_obj()
for h_name, h_val in zip( input_cef.header_names, input_cef.header_values):
output_cef.add_header(h_name, h_val )
if normal_spin_axis == 'both':
for c_name, c_val in zip( input_cef.col_attr_names, input_cef.col_attr_values):
output_cef.add_col_attr(c_name, array(c_val)[results[1]])
for r_name, r_val in zip( input_cef.row_attr_names, input_cef.row_attr_values):
output_cef.add_row_attr(r_name, array(r_val)[results[0]])
output_cef.set_matrix(array(input_cef.matrix)[results[0],:][:,results[1]])
if normal_spin_axis == 0:
for r_name, r_val in zip( input_cef.row_attr_names, input_cef.row_attr_values):
output_cef.add_row_attr(r_name, array(r_val)[results])
output_cef.set_matrix(array(input_cef.matrix)[results,:])
if normal_spin_axis == 1:
for c_name, c_val in zip( input_cef.col_attr_names, input_cef.col_attr_values):
output_cef.add_col_attr(c_name, array(c_val)[results])
output_cef.set_matrix(array(input_cef.matrix)[:,results])
output_cef.writeCEF( outfiles_path )
| linnarsson-lab/BackSPIN | backspinpy/backSPIN.py | Python | bsd-2-clause | 37,358 | [
"VisIt"
] | 2a15555e3ce55c79944921f232e19d79e06cc1d947e90f99581bd846195dbb36 |
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf.pbc import gto, scf
cell = gto.M(atom='''
C 4.826006352031 3.412501814582 8.358888185226
C 0.689429478862 0.487500259226 1.194126883604
''',
a='''
4.136576868, 0.000000000, 2.388253772
1.378858962, 3.900002074, 2.388253772
0.000000000, 0.000000000, 4.776507525
''',
unit='B',
precision=1e-14,
basis='gth-tzv2p',
pseudo='gth-lda',
mesh=[15]*3,
verbose=0)
class KnownValues(unittest.TestCase):
def test_rcut(self):
kpts = cell.make_kpts([2,2,2])
t0 = numpy.asarray(cell.pbc_intor('int1e_kin_sph', hermi=1, kpts=kpts))
s0 = numpy.asarray(cell.pbc_intor('int1e_ovlp_sph', hermi=1, kpts=kpts))
for i in range(1, 10):
prec = 1e-13 * 10**i
cell.rcut = max([cell.bas_rcut(ib, prec) for ib in range(cell.nbas)])
t1 = numpy.asarray(cell.pbc_intor('int1e_kin_sph', hermi=1, kpts=kpts))
s1 = numpy.asarray(cell.pbc_intor('int1e_ovlp_sph', hermi=1, kpts=kpts))
#print prec, cell.rcut, abs(t1-t0).max(), abs(s1-s0).max()
print(prec, 'error = ', abs(t1-t0).max(), abs(s1-s0).max())
self.assertTrue(abs(t1-t0).max() < prec*1e-0)
self.assertTrue(abs(s1-s0).max() < prec*1e-1)
if __name__ == '__main__':
print("Test rcut and the errorsin pbc.gto.cell")
unittest.main()
| gkc1000/pyscf | pyscf/pbc/gto/test/test_rcut.py | Python | apache-2.0 | 1,980 | [
"PySCF"
] | f232f42a6b21ef2558a36fa31b300f8ed63d5c0e8a70e15891ea4ec39d9747f0 |
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{prefix}} {{last_name}}',
'{{first_name}} {{last_name}}-{{last_name}}',
'{{first_name}}-{{first_name}} {{last_name}}',
)
first_names = (
'Adriana', 'Afonso', 'Alex', 'Alexandra', 'Alexandre', 'Alice',
'Alícia', 'Amélia', 'Ana', 'Andreia', 'André', 'Anita', 'António',
'Ariana', 'Artur', 'Beatriz', 'Benedita', 'Benjamim', 'Bernardo',
'Bianca', 'Brian', 'Bruna', 'Bruno', 'Bryan', 'Bárbara', 'Caetana',
'Camila', 'Carlos', 'Carlota', 'Carminho', 'Carolina', 'Catarina',
'Clara', 'Cláudio', 'Constança', 'Cristiano', 'César', 'Daniel',
'Daniela', 'David', 'Denis', 'Diana', 'Diego', 'Dinis', 'Diogo',
'Duarte', 'Débora', 'Edgar', 'Eduarda', 'Eduardo', 'Ema', 'Emanuel',
'Emma', 'Emília', 'Enzo', 'Erica', 'Erika', 'Eva', 'Fabiana',
'Fernando', 'Filipa', 'Filipe', 'Flor', 'Francisca', 'Francisco',
'Frederico', 'Fábio', 'Gabriel', 'Gabriela', 'Gaspar', 'Gil', 'Gonçalo',
'Guilherme', 'Gustavo', 'Helena', 'Henrique', 'Hugo', 'Iara', 'Igor',
'Inês', 'Irina', 'Isaac', 'Isabel', 'Isabela', 'Ivan', 'Ivo', 'Jaime',
'Joana', 'Joaquim', 'Joel', 'Jorge', 'José', 'João', 'Juliana',
'Jéssica', 'Júlia', 'Kelly', 'Kevin', 'Kyara', 'Kévim', 'Lara',
'Larissa', 'Laura', 'Leandro', 'Leonardo', 'Leonor', 'Letícia', 'Lia',
'Lisandro', 'Lorena', 'Lourenço', 'Luana', 'Luca', 'Lucas', 'Luciana',
'Luna', 'Luís', 'Luísa', 'Lúcia', 'Madalena', 'Mafalda', 'Manuel',
'Mara', 'Marco', 'Marcos', 'Margarida', 'Maria', 'Mariana', 'Marta',
'Martim', 'Mateus', 'Matias', 'Matilde', 'Mauro', 'Melissa', 'Mia',
'Micael', 'Miguel', 'Miriam', 'Márcio', 'Mário', 'Mélanie', 'Naiara',
'Nair', 'Nelson', 'Nicole', 'Noa', 'Noah', 'Nuno', 'Nádia', 'Núria',
'Patrícia', 'Paulo', 'Pedro', 'Petra', 'Pilar', 'Rafael', 'Rafaela',
'Raquel', 'Renata', 'Renato', 'Ricardo', 'Rita', 'Rodrigo', 'Rui',
'Rúben', 'Salomé', 'Salvador', 'Samuel', 'Sandro', 'Santiago', 'Sara',
'Sebastião', 'Simão', 'Sofia', 'Soraia', 'Sérgio', 'Tatiana', 'Teresa',
'Tiago', 'Tomás', 'Tomé', 'Valentim', 'Valentina', 'Vasco', 'Vera',
'Vicente', 'Victória', 'Violeta', 'Vitória', 'Vítor', 'William',
'Wilson', 'Xavier', 'Yara', 'Yasmin', 'Álvaro', 'Ângela', 'Ângelo',
'Érica', 'Íris',
)
last_names = (
'Abreu', 'Almeida', 'Alves', 'Amaral', 'Amorim', 'Andrade', 'Anjos',
'Antunes', 'Araújo', 'Assunção', 'Azevedo', 'Baptista', 'Barbosa',
'Barros', 'Batista', 'Borges', 'Branco', 'Brito', 'Campos', 'Cardoso',
'Carneiro', 'Carvalho', 'Castro', 'Coelho', 'Correia', 'Costa', 'Cruz',
'Cunha', 'Domingues', 'Esteves', 'Faria', 'Fernandes', 'Ferreira',
'Figueiredo', 'Fonseca', 'Freitas', 'Garcia', 'Gaspar', 'Gomes',
'Gonçalves', 'Guerreiro', 'Henriques', 'Jesus', 'Leal', 'Leite', 'Lima',
'Lopes', 'Loureiro', 'Lourenço', 'Macedo', 'Machado', 'Magalhães',
'Maia', 'Marques', 'Martins', 'Matias', 'Matos', 'Melo', 'Mendes',
'Miranda', 'Monteiro', 'Morais', 'Moreira', 'Mota', 'Moura',
'Nascimento', 'Neto', 'Neves', 'Nogueira', 'Nunes', 'Oliveira',
'Pacheco', 'Paiva', 'Pereira', 'Pinheiro', 'Pinho', 'Pinto', 'Pires',
'Ramos', 'Reis', 'Ribeiro', 'Rocha', 'Rodrigues', 'Santos', 'Silva',
'Simões', 'Soares', 'Sousa', 'Sá', 'Tavares', 'Teixeira', 'Torres',
'Valente', 'Vaz', 'Vicente', 'Vieira',
)
prefixes = ('de', 'da', 'do')
def prefix(self):
return self.random_element(self.prefixes)
| deanishe/alfred-fakeum | src/libs/faker/providers/person/pt_PT/__init__.py | Python | mit | 4,138 | [
"Brian"
] | 287bb585bf74ba03e5c48e59d6161e2f08b40e423195a424f637e25f787f043b |
import os
import sys
import time
import numpy as np
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import gpu_contiguous
# implementing Gaussian observation layer
# each output unit encodes P(o | s,a) ~ N(o | mu(s,a) , sigma(s,a) )
class GaussianObs(object):
""" Initialize from xml definition node """
def __init__(self,layer_def,inputs,inputs_shape,rs,clone_from=None):
#the inputs are expected to be [actions,features]
numLabels = int(layer_def.find("numlabels").text)
numActions = int(layer_def.find("numactions").text)
featdim = int(layer_def.find("featdim").text)
covType = layer_def.find("covariance").text
assert(covType == "diag")
actions,feats = inputs
#assert(feats.output_shape[0] == featdim)
#initialize weights
rng = np.random.RandomState(seed=int(time.time()))
self.inputs = inputs
_,batch_size = inputs_shape[0]
#the parameters of this model are a matrix of size mu = n_in x numUnits, var= n_in x numUnits
# initialize mean and variance
if clone_from!=None:
self.mu = clone_from.mu
self.std = clone_from.std
else:
values = np.ones([numActions ,numLabels ,featdim ],dtype=theano.config.floatX)
self.mu = theano.shared(value=values, name='mu', borrow=True)
self.std = theano.shared(value=values, name='std', borrow=True)
#calculate output
#input : featdim x batchsize
#mu: numActions x numLabels x featdim
#std: numActions x numLabels x featdim
#batchsize x numLabels x featdim
mean_subtracted = (self.mu[actions,:,:] - feats.dimshuffle(1,'x',0))/ self.std[actions,:,:]# output: batchsize x numLabels x featdim
Xsq = -0.5 * (mean_subtracted * mean_subtracted).sum(axis=2)
temp = -0.5*featdim*np.log(2.*np.pi)-T.sum(T.log(self.std[actions,:,:]),axis=2).dimshuffle(1,0)+Xsq.dimshuffle(1,0)
"""
mean_subbed = (means[actions_np,:,:] - x.T[:,np.newaxis,:]) / stds[actions_np,:,:]
Xsq = -0.5 * (mean_subbed*mean_subbed).sum(axis=2)
coeff = -0.5 * 16384 * np.log(2 * np.pi) - np.sum(np.log(stds[actions_np,:,:]),axis=2).T + Xsq.T
"""
#self.output = T.exp(temp / 1000.)
self.output = temp
# parameters of the model
self.inputs_shape = inputs_shape
self.output_shape = [numLabels,batch_size]
self.params = [self.mu,self.std]
#self.params = []
| mohsenmalmir/DeepLearningStack | DeepLearningStack/RL/GaussianObs.py | Python | mit | 2,677 | [
"Gaussian"
] | 449b055d2335934162a44b012a4931ce350002fb440e7154a694f31252dcf92e |
import numpy as np
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import pyroms
def remap(src_array, remap_file, src_grad1=None, src_grad2=None, \
src_grad3=None, spval=1e37, verbose=False):
'''
remap based on addresses and weights computed in a setup phase
'''
# get info from remap_file
data = netCDF.Dataset(remap_file, 'r')
title = data.title
map_method = data.map_method
normalization = data.normalization
src_grid_name = data.source_grid
dst_grid_name = data.dest_grid
src_grid_size = len(data.dimensions['src_grid_size'])
dst_grid_size = len(data.dimensions['dst_grid_size'])
num_links = len(data.dimensions['num_links'])
src_grid_dims = data.variables['src_grid_dims']
dst_grid_dims = data.variables['dst_grid_dims']
# get weights and addresses from remap_file
map_wts = data.variables['remap_matrix'][:]
dst_add = data.variables['dst_address'][:]
src_add = data.variables['src_address'][:]
# get destination mask
dst_mask = data.variables['dst_grid_imask'][:]
# remap from src grid to dst grid
if src_grad1 is not None:
iorder = 2
else:
iorder = 1
if verbose is True:
print 'Reading remapping: ', title
print 'From file: ', remap_file
print ' '
print 'Remapping between:'
print src_grid_name
print 'and'
print dst_grid_name
print 'Remapping method: ', map_method
ndim = len(src_array.squeeze().shape)
if (ndim == 2):
tmp_dst_array = np.zeros((dst_grid_size))
tmp_src_array = src_array.flatten()
if iorder == 1:
# first order remapping
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0] = map_wts[:,0].copy()
map_wts = tmp_map_wts
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array)
if iorder == 2:
# second order remapping
if map_method == 'conservative':
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0:2] = map_wts[:,0:2].copy()
map_wts = tmp_map_wts
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2)
elif map_method == 'bicubic':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
tmp_src_grad3 = src_grad3.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2, \
tmp_src_grad3)
else:
raise ValueError, 'Unknow method'
# mask dst_array
idx = np.where(dst_mask == 0)
tmp_dst_array[idx] = spval
tmp_dst_array = np.ma.masked_values(tmp_dst_array, spval)
# reshape
dst_array = np.reshape(tmp_dst_array, (dst_grid_dims[1], \
dst_grid_dims[0]))
elif (ndim == 3):
nlev = src_array.shape[0]
dst_array = np.zeros((nlev, dst_grid_dims[1], dst_grid_dims[0]))
# loop over vertical level
for k in range(nlev):
tmp_src_array = src_array[k,:,:].flatten()
tmp_dst_array = np.zeros((dst_grid_size))
if iorder == 1:
# first order remapping
# insure that map_wts is a (num_links,4) array
tmp_map_wts = np.zeros((num_links,4))
tmp_map_wts[:,0] = map_wts[:,0].copy()
map_wts = tmp_map_wts
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array)
if iorder == 2:
# second order remapping
if map_method == 'conservative':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2)
elif map_method == 'bicubic':
tmp_src_grad1 = src_grad1.flatten()
tmp_src_grad2 = src_grad2.flatten()
tmp_src_grad3 = src_grad3.flatten()
pyroms.remapping.scrip.remap(tmp_dst_array, map_wts, \
dst_add, src_add, tmp_src_array, \
tmp_src_grad1, tmp_src_grad2, \
tmp_src_grad3)
else:
raise ValueError, 'Unknow method'
# mask dst_array
idx = np.where(dst_mask == 0)
tmp_dst_array[idx] = spval
tmp_dst_array = np.ma.masked_values(tmp_dst_array, spval)
# reshape
dst_array[k,:,:] = np.reshape(tmp_dst_array, (dst_grid_dims[1], \
dst_grid_dims[0]))
else:
raise ValueError, 'src_array must have two or three dimensions'
# close data file
data.close()
return dst_array
| dcherian/pyroms | pyroms/pyroms/remapping/remap.py | Python | bsd-3-clause | 5,885 | [
"NetCDF"
] | f8699319188f276e692a132c31fe864500318174ebe892ce41de124acb494d0e |
"""This module contains the "Viz" objects
These objects represent the backend of all the visualizations that
Caravel can render.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import hashlib
import logging
import uuid
import zlib
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from flask import request
from flask_babel import lazy_gettext as _
from markdown import markdown
import simplejson as json
from six import string_types, PY3
from werkzeug.datastructures import ImmutableMultiDict, MultiDict
from werkzeug.urls import Href
from dateutil import relativedelta as rdelta
from caravel import app, utils, cache
from caravel.forms import FormFactory
from caravel.utils import flasher
config = app.config
class BaseViz(object):
"""All visualizations derive this base class"""
viz_type = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metrics', 'groupby',
)
},)
form_overrides = {}
def __init__(self, datasource, form_data, slice_=None):
self.orig_form_data = form_data
if not datasource:
raise Exception("Viz is missing a datasource")
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.slice = slice_
# TODO refactor all form related logic out of here and into forms.py
ff = FormFactory(self)
form_class = ff.get_form()
defaults = form_class().data.copy()
previous_viz_type = form_data.get('previous_viz_type')
if isinstance(form_data, ImmutableMultiDict):
form = form_class(form_data)
else:
form = form_class(**form_data)
data = form.data.copy()
if not form.validate():
for k, v in form.errors.items():
if not data.get('json') and not data.get('async'):
flasher("{}: {}".format(k, " ".join(v)), 'danger')
if previous_viz_type != self.viz_type:
data = {
k: form.data[k]
for k in form_data.keys()
if k in form.data}
defaults.update(data)
self.form_data = defaults
self.query = ""
self.form_data['previous_viz_type'] = self.viz_type
self.token = self.form_data.get(
'token', 'token_' + uuid.uuid4().hex[:8])
self.metrics = self.form_data.get('metrics') or []
self.groupby = self.form_data.get('groupby') or []
self.reassignments()
@classmethod
def flat_form_fields(cls):
l = set()
for d in cls.fieldsets:
for obj in d['fields']:
if obj and isinstance(obj, (tuple, list)):
l |= {a for a in obj if a}
elif obj:
l.add(obj)
return tuple(l)
def reassignments(self):
pass
def get_url(self, for_cache_key=False, **kwargs):
"""Returns the URL for the viz
:param for_cache_key: when getting the url as the identifier to hash
for the cache key
:type for_cache_key: boolean
"""
d = self.orig_form_data.copy()
if 'json' in d:
del d['json']
if 'action' in d:
del d['action']
d.update(kwargs)
# Remove unchecked checkboxes because HTML is weird like that
od = MultiDict()
for key in sorted(d.keys()):
if d[key] is False:
del d[key]
else:
if isinstance(d, MultiDict):
v = d.getlist(key)
else:
v = d.get(key)
if not isinstance(v, list):
v = [v]
for item in v:
od.add(key, item)
href = Href(
'/caravel/explore/{self.datasource.type}/'
'{self.datasource.id}/'.format(**locals()))
if for_cache_key and 'force' in od:
del od['force']
return href(od)
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
self.error_msg = ""
self.results = None
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_obj['granularity'])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(**query_obj)
self.query = self.results.query
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is None or df.empty:
raise Exception("No data, review your incantations!")
else:
if 'timestamp' in df.columns:
if timestamp_format == "epoch_s":
df.timestamp = pd.to_datetime(
df.timestamp, utc=False, unit="s")
elif timestamp_format == "epoch_ms":
df.timestamp = pd.to_datetime(
df.timestamp, utc=False, unit="ms")
else:
df.timestamp = pd.to_datetime(
df.timestamp, utc=False, format=timestamp_format)
if self.datasource.offset:
df.timestamp += timedelta(hours=self.datasource.offset)
df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
return df
@property
def form(self):
return self.form_class(**self.form_data)
@property
def form_class(self):
return FormFactory(self).get_form()
def query_filters(self, is_having_filter=False):
"""Processes the filters for the query"""
form_data = self.form_data
# Building filters
filters = []
field_prefix = 'flt' if not is_having_filter else 'having'
for i in range(1, 10):
col = form_data.get(field_prefix + "_col_" + str(i))
op = form_data.get(field_prefix + "_op_" + str(i))
eq = form_data.get(field_prefix + "_eq_" + str(i))
if col and op and eq is not None:
filters.append((col, op, eq))
# Extra filters (coming from dashboard)
extra_filters = form_data.get('extra_filters')
if extra_filters and not is_having_filter:
extra_filters = json.loads(extra_filters)
for slice_filters in extra_filters.values():
for col, vals in slice_filters.items():
if col and vals:
if col in self.datasource.filterable_column_names:
filters += [(col, 'in', ",".join(vals))]
return filters
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
groupby = form_data.get("groupby") or []
metrics = form_data.get("metrics") or ['count']
granularity = \
form_data.get("granularity") or form_data.get("granularity_sqla")
limit = int(form_data.get("limit", 0))
row_limit = int(
form_data.get("row_limit", config.get("ROW_LIMIT")))
since = form_data.get("since", "1 year ago")
from_dttm = utils.parse_human_datetime(since)
if from_dttm > datetime.now():
from_dttm = datetime.now() - (from_dttm-datetime.now())
until = form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm > to_dttm:
flasher("The date range doesn't seem right.", "danger")
from_dttm = to_dttm # Making them identical to not raise
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
'where': form_data.get("where", ''),
'having': form_data.get("having", ''),
'having_druid': self.query_filters(True),
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': self.is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': self.query_filters(),
'timeseries_limit': limit,
'extras': extras,
}
return d
@property
def cache_timeout(self):
if self.slice and self.slice.cache_timeout:
return self.slice.cache_timeout
if self.datasource.cache_timeout:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, 'database') and
self.datasource.database.cache_timeout):
return self.datasource.database.cache_timeout
return config.get("CACHE_DEFAULT_TIMEOUT")
def get_json(self):
"""Handles caching around the json payload retrieval"""
cache_key = self.cache_key
payload = None
if self.form_data.get('force') != 'true':
payload = cache.get(cache_key)
if payload:
is_cached = True
try:
cached_data = zlib.decompress(payload)
if PY3:
cached_data = cached_data.decode('utf-8')
payload = json.loads(cached_data)
except Exception as e:
logging.error("Error reading cache")
payload = None
logging.info("Serving from cache")
if not payload:
is_cached = False
cache_timeout = self.cache_timeout
payload = {
'cache_timeout': cache_timeout,
'cache_key': cache_key,
'csv_endpoint': self.csv_endpoint,
'data': self.get_data(),
'form_data': self.form_data,
'json_endpoint': self.json_endpoint,
'query': self.query,
'standalone_endpoint': self.standalone_endpoint,
}
payload['cached_dttm'] = datetime.now().isoformat().split('.')[0]
logging.info("Caching for the next {} seconds".format(
cache_timeout))
try:
data = self.json_dumps(payload)
if PY3:
data = bytes(data, 'utf-8')
cache.set(
cache_key,
zlib.compress(data),
timeout=cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
payload['is_cached'] = is_cached
return self.json_dumps(payload)
def json_dumps(self, obj):
"""Used by get_json, can be overridden to use specific switches"""
return json.dumps(obj, default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'csv_endpoint': self.csv_endpoint,
'form_data': self.form_data,
'json_endpoint': self.json_endpoint,
'standalone_endpoint': self.standalone_endpoint,
'token': self.token,
'viz_name': self.viz_type,
'column_formats': {
m.metric_name: m.d3format
for m in self.datasource.metrics
if m.d3format
},
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, encoding="utf-8")
def get_data(self):
return []
@property
def json_endpoint(self):
return self.get_url(json="true")
@property
def cache_key(self):
url = self.get_url(for_cache_key=True, json="true", force="false")
return hashlib.md5(url.encode('utf-8')).hexdigest()
@property
def csv_endpoint(self):
return self.get_url(csv="true")
@property
def standalone_endpoint(self):
return self.get_url(standalone="true")
@property
def json_data(self):
return json.dumps(self.data)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
fieldsets = ({
'label': _("GROUP BY"),
'description': _('Use this section if you want a query that aggregates'),
'fields': ('groupby', 'metrics')
}, {
'label': _("NOT GROUPED BY"),
'description': _('Use this section if you want to query atomic rows'),
'fields': ('all_columns', 'order_by_cols'),
}, {
'label': _("Options"),
'fields': (
'table_timestamp_format',
'row_limit',
('include_search', None),
)
})
form_overrides = ({
'metrics': {
'default': [],
},
})
is_timeseries = False
def query_obj(self):
d = super(TableViz, self).query_obj()
fd = self.form_data
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both")
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
d['orderby'] = [json.loads(t) for t in fd.get('order_by_cols', [])]
return d
def get_df(self, query_obj=None):
df = super(TableViz, self).get_df(query_obj)
if (
self.form_data.get("granularity") == "all" and
'timestamp' in df):
del df['timestamp']
return df
def get_data(self):
df = self.get_df()
return dict(
records=df.to_dict(orient="records"),
columns=list(df.columns),
)
def json_dumps(self, obj):
return json.dumps(obj, default=utils.json_iso_dttm_ser)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'groupby',
'columns',
'metrics',
'pandas_aggfunc',
)
},)
def query_obj(self):
d = super(PivotTableViz, self).query_obj()
groupby = self.form_data.get('groupby')
columns = self.form_data.get('columns')
metrics = self.form_data.get('metrics')
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise Exception("Please choose at least one \"Group by\" field ")
if not metrics:
raise Exception("Please choose at least one metric")
if (
any(v in groupby for v in columns) or
any(v in columns for v in groupby)):
raise Exception("groupby and columns can't overlap")
d['groupby'] = list(set(groupby) | set(columns))
return d
def get_df(self, query_obj=None):
df = super(PivotTableViz, self).get_df(query_obj)
if (
self.form_data.get("granularity") == "all" and
'timestamp' in df):
del df['timestamp']
df = df.pivot_table(
index=self.form_data.get('groupby'),
columns=self.form_data.get('columns'),
values=self.form_data.get('metrics'),
aggfunc=self.form_data.get('pandas_aggfunc'),
margins=True,
)
return df
def get_data(self):
return self.get_df().to_html(
na_rep='',
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover").split(" "))
class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
viz_type = "markup"
verbose_name = _("Markup")
fieldsets = ({
'label': None,
'fields': ('markup_type', 'code')
},)
is_timeseries = False
def rendered(self):
markup_type = self.form_data.get("markup_type")
code = self.form_data.get("code", '')
if markup_type == "markdown":
return markdown(code)
elif markup_type == "html":
return code
def get_data(self):
return dict(html=self.rendered())
class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
viz_type = "separator"
verbose_name = _("Separator")
form_overrides = {
'code': {
'default': (
"####Section Title\n"
"A paragraph describing the section"
"of the dashboard, right before the separator line "
"\n\n"
"---------------"
),
}
}
class WordCloudViz(BaseViz):
"""Build a colorful word cloud
Uses the nice library at:
https://github.com/jasondavies/d3-cloud
"""
viz_type = "word_cloud"
verbose_name = _("Word Cloud")
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'series', 'metric', 'limit',
('size_from', 'size_to'),
'rotation',
)
},)
def query_obj(self):
d = super(WordCloudViz, self).query_obj()
d['metrics'] = [self.form_data.get('metric')]
d['groupby'] = [self.form_data.get('series')]
return d
def get_data(self):
df = self.get_df()
# Ordering the columns
df = df[[self.form_data.get('series'), self.form_data.get('metric')]]
# Labeling the columns for uniform json schema
df.columns = ['text', 'size']
return df.to_dict(orient="records")
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metrics',
'groupby',
),
}, {
'label': _('Chart Options'),
'fields': (
'treemap_ratio',
'number_format',
)
},)
def get_df(self, query_obj=None):
df = super(TreemapViz, self).get_df(query_obj)
df = df.set_index(self.form_data.get("groupby"))
return df
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v}
for n, v in zip(df.index, df[metric])]
else:
result = [{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]]
return result
def get_data(self):
df = self.get_df()
chart_data = [{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calender Heatmap")
credits = (
'<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>')
is_timeseries = True
fieldsets = ({
'label': None,
'fields': (
'metric',
'domain_granularity',
'subdomain_granularity',
),
},)
def get_df(self, query_obj=None):
df = super(CalHeatmapViz, self).get_df(query_obj)
return df
def get_data(self):
df = self.get_df()
form_data = self.form_data
df.columns = ["timestamp", "metric"]
timestamps = {str(obj["timestamp"].value / 10**9):
obj.get("metric") for obj in df.to_dict("records")}
start = utils.parse_human_datetime(form_data.get("since"))
end = utils.parse_human_datetime(form_data.get("until"))
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24*60*60) + 1
else:
range_ = diff_secs // (60*60) + 1
return {
"timestamps": timestamps,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self):
qry = super(CalHeatmapViz, self).query_obj()
qry["metrics"] = [self.form_data["metric"]]
return qry
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metrics',
'groupby', 'limit',
),
}, {
'label': _('Chart Options'),
'fields': (
'whisker_options',
)
},)
def get_df(self, query_obj=None):
form_data = self.form_data
df = super(BoxPlotViz, self).get_df(query_obj)
df = df.fillna(0)
# conform to NVD3 names
def Q1(series): # need to be named functions - can't use lambdas
return np.percentile(series, 25)
def Q3(series):
return np.percentile(series, 75)
whisker_type = form_data.get('whisker_options')
if whisker_type == "Tukey":
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
series = series[series <= upper_outer_lim]
return series[np.abs(series - upper_outer_lim).argmin()]
def whisker_low(series):
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
# find the closest value above the lower outer limit
series = series[series >= lower_outer_lim]
return series[np.abs(series - lower_outer_lim).argmin()]
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series):
return series.max()
def whisker_low(series):
return series.min()
elif " percentiles" in whisker_type:
low, high = whisker_type.replace(" percentiles", "").split("/")
def whisker_high(series):
return np.percentile(series, int(high))
def whisker_low(series):
return np.percentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.median, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get('groupby')).agg(aggregate)
return df
def to_series(self, df, classed='', title_suffix=''):
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
if key == "median":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data.get("metrics")) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({
"label": chart_label,
"values": box,
})
return chart_data
def get_data(self):
df = self.get_df()
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'series', 'entity',
'x', 'y',
'size', 'limit',
)
}, {
'label': _('Chart Options'),
'fields': (
('x_log_scale', 'y_log_scale'),
('show_legend', None),
'max_bubble_size',
('x_axis_label', 'y_axis_label'),
)
},)
def query_obj(self):
form_data = self.form_data
d = super(BubbleViz, self).query_obj()
d['groupby'] = list({
form_data.get('series'),
form_data.get('entity')
})
self.x_metric = form_data.get('x')
self.y_metric = form_data.get('y')
self.z_metric = form_data.get('size')
self.entity = form_data.get('entity')
self.series = form_data.get('series')
d['metrics'] = [
self.z_metric,
self.x_metric,
self.y_metric,
]
if not all(d['metrics'] + [self.entity, self.series]):
raise Exception("Pick a metric for x, y and size")
return d
def get_df(self, query_obj=None):
df = super(BubbleViz, self).get_df(query_obj)
df = df.fillna(0)
df['x'] = df[[self.x_metric]]
df['y'] = df[[self.y_metric]]
df['size'] = df[[self.z_metric]]
df['shape'] = 'circle'
df['group'] = df[[self.series]]
return df
def get_data(self):
df = self.get_df()
series = defaultdict(list)
for row in df.to_dict(orient='records'):
series[row['group']].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({
'key': k,
'values': v})
return chart_data
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
is_timeseries = True
fieldsets = ({
'label': None,
'fields': (
'metric',
'compare_lag',
'compare_suffix',
'y_axis_format',
)
},)
form_overrides = {
'y_axis_format': {
'label': _('Number format'),
}
}
def reassignments(self):
metric = self.form_data.get('metric')
if not metric:
self.form_data['metric'] = self.orig_form_data.get('metrics')
def query_obj(self):
d = super(BigNumberViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception("Pick a metric!")
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self):
form_data = self.form_data
df = self.get_df()
df.sort_values(by=df.columns[0], inplace=True)
compare_lag = form_data.get("compare_lag", "")
compare_lag = int(compare_lag) if compare_lag and compare_lag.isdigit() else 0
return {
'data': df.values.tolist(),
'compare_lag': compare_lag,
'compare_suffix': form_data.get('compare_suffix', ''),
}
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metric',
'subheader',
'y_axis_format',
)
},)
form_overrides = {
'y_axis_format': {
'label': _('Number format'),
}
}
def reassignments(self):
metric = self.form_data.get('metric')
if not metric:
self.form_data['metric'] = self.orig_form_data.get('metrics')
def query_obj(self):
d = super(BigNumberTotalViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception("Pick a metric!")
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self):
form_data = self.form_data
df = self.get_df()
df.sort_values(by=df.columns[0], inplace=True)
return {
'data': df.values.tolist(),
'subheader': form_data.get('subheader', ''),
}
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
fieldsets = ({
'label': None,
'fields': (
'metrics',
'groupby', 'limit',
),
}, {
'label': _('Chart Options'),
'fields': (
('show_brush', 'show_legend'),
('rich_tooltip', 'y_axis_zero'),
('y_log_scale', 'contribution'),
('line_interpolation', 'x_axis_showminmax'),
('x_axis_format', 'y_axis_format'),
('x_axis_label', 'y_axis_label'),
),
}, {
'label': _('Advanced Analytics'),
'description': _(
"This section contains options "
"that allow for advanced analytical post processing "
"of query results"),
'fields': (
('rolling_type', 'rolling_periods'),
'time_compare',
'num_period_compare',
None,
('resample_how', 'resample_rule',), 'resample_fillmethod'
),
},)
def get_df(self, query_obj=None):
form_data = self.form_data
df = super(NVD3TimeSeriesViz, self).get_df(query_obj)
df = df.fillna(0)
if form_data.get("granularity") == "all":
raise Exception("Pick a time granularity for your time series")
df = df.pivot_table(
index="timestamp",
columns=form_data.get('groupby'),
values=form_data.get('metrics'))
fm = form_data.get("resample_fillmethod")
if not fm:
fm = None
how = form_data.get("resample_how")
rule = form_data.get("resample_rule")
if how and rule:
df = df.resample(rule, how=how, fill_method=fm)
if not fm:
df = df.fillna(0)
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
if form_data.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
num_period_compare = form_data.get("num_period_compare")
if num_period_compare:
num_period_compare = int(num_period_compare)
df = (df / df.shift(num_period_compare)) - 1
df = df[num_period_compare:]
rolling_periods = form_data.get("rolling_periods")
rolling_type = form_data.get("rolling_type")
if rolling_type in ('mean', 'std', 'sum') and rolling_periods:
if rolling_type == 'mean':
df = pd.rolling_mean(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'std':
df = pd.rolling_std(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'sum':
df = pd.rolling_sum(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'cumsum':
df = df.cumsum()
return df
def to_series(self, df, classed='', title_suffix=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
df['timestamp'] = pd.to_datetime(df.index, utc=False)
if isinstance(name, string_types):
series_title = name
else:
name = ["{}".format(s) for s in name]
if len(self.form_data.get('metrics')) > 1:
series_title = ", ".join(name)
else:
series_title = ", ".join(name[1:])
if title_suffix:
series_title += title_suffix
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.timestamp
],
}
chart_data.append(d)
return chart_data
def get_data(self):
df = self.get_df()
chart_data = self.to_series(df)
time_compare = self.form_data.get('time_compare')
if time_compare:
query_object = self.query_obj()
delta = utils.parse_human_timedelta(time_compare)
query_object['inner_from_dttm'] = query_object['from_dttm']
query_object['inner_to_dttm'] = query_object['to_dttm']
query_object['from_dttm'] -= delta
query_object['to_dttm'] -= delta
df2 = self.get_df(query_object)
df2.index += delta
chart_data += self.to_series(
df2, classed='caravel', title_suffix="---")
chart_data = sorted(chart_data, key=lambda x: x['key'])
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
fieldsets = [NVD3TimeSeriesViz.fieldsets[0]] + [{
'label': _('Chart Options'),
'fields': (
('show_brush', 'show_legend', 'show_bar_value'),
('rich_tooltip', 'y_axis_zero'),
('y_log_scale', 'contribution'),
('x_axis_format', 'y_axis_format'),
('line_interpolation', 'bar_stacked'),
('x_axis_showminmax', 'bottom_margin'),
('x_axis_label', 'y_axis_label'),
('reduce_x_ticks', 'show_controls'),
), }] + [NVD3TimeSeriesViz.fieldsets[2]]
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = 'compare'
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
fieldsets = [NVD3TimeSeriesViz.fieldsets[0]] + [{
'label': _('Chart Options'),
'fields': (
('show_brush', 'show_legend'),
('rich_tooltip', 'y_axis_zero'),
('y_log_scale', 'contribution'),
('x_axis_format', 'y_axis_format'),
('x_axis_showminmax', 'show_controls'),
('line_interpolation', 'stacked_style'),
), }] + [NVD3TimeSeriesViz.fieldsets[2]]
class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
viz_type = "pie"
verbose_name = _("Distribution - NVD3 - Pie Chart")
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'metrics', 'groupby',
'limit',
'pie_label_type',
('donut', 'show_legend'),
'labels_outside',
)
},)
def query_obj(self):
d = super(DistributionPieViz, self).query_obj()
d['is_timeseries'] = False
return d
def get_df(self, query_obj=None):
df = super(DistributionPieViz, self).get_df(query_obj)
df = df.pivot_table(
index=self.groupby,
values=[self.metrics[0]])
df.sort_values(by=self.metrics[0], ascending=False, inplace=True)
return df
def get_data(self):
df = self.get_df()
df = df.reset_index()
df.columns = ['x', 'y']
return df.to_dict(orient="records")
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
('all_columns_x',),
'row_limit',
)
}, {
'label': _("Histogram Options"),
'fields': (
'link_length',
)
},)
form_overrides = {
'all_columns_x': {
'label': _('Numeric Column'),
'description': _("Select the numeric column to draw the histogram"),
},
'link_length': {
'label': _("No of Bins"),
'description': _("Select number of bins for the histogram"),
'default': 5
}
}
def query_obj(self):
"""Returns the query object for this visualization"""
d = super(HistogramViz, self).query_obj()
d['row_limit'] = self.form_data.get('row_limit', int(config.get('ROW_LIMIT')))
numeric_column = self.form_data.get('all_columns_x')
if numeric_column is None:
raise Exception("Must have one numeric column specified")
d['columns'] = [numeric_column]
return d
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
self.results = self.datasource.query(**query_obj)
self.query = self.results.query
df = self.results.df
if df is None or df.empty:
raise Exception("No data, to build histogram")
df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
return df
def get_data(self):
"""Returns the chart data"""
df = self.get_df()
chart_data = df[df.columns[0]].values.tolist()
return chart_data
class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
fieldsets = ({
'label': _('Chart Options'),
'fields': (
'groupby',
'columns',
'metrics',
'row_limit',
('show_legend', 'show_bar_value', 'bar_stacked'),
('y_axis_format', 'bottom_margin'),
('x_axis_label', 'y_axis_label'),
('reduce_x_ticks', 'contribution'),
('show_controls', None),
)
},)
form_overrides = {
'groupby': {
'label': _('Series'),
},
'columns': {
'label': _('Breakdowns'),
'description': _("Defines how each series is broken down"),
},
}
def query_obj(self):
d = super(DistributionPieViz, self).query_obj() # noqa
fd = self.form_data
d['is_timeseries'] = False
gb = fd.get('groupby') or []
cols = fd.get('columns') or []
d['groupby'] = set(gb + cols)
if len(d['groupby']) < len(gb) + len(cols):
raise Exception("Can't have overlap between Series and Breakdowns")
if not self.metrics:
raise Exception("Pick at least one metric")
if not self.groupby:
raise Exception("Pick at least one field for [Series]")
return d
def get_df(self, query_obj=None):
df = super(DistributionPieViz, self).get_df(query_obj) # noqa
fd = self.form_data
row = df.groupby(self.groupby).sum()[self.metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
columns = fd.get('columns') or []
pt = df.pivot_table(
index=self.groupby,
columns=columns,
values=self.metrics)
if fd.get("contribution"):
pt = pt.fillna(0)
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
return pt
def get_data(self):
df = self.get_df()
chart_data = []
for name, ys in df.iteritems():
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, string_types):
series_title = name
elif len(self.metrics) > 1:
series_title = ", ".join(name)
else:
l = [str(s) for s in name[1:]]
series_title = ", ".join(l)
d = {
"key": series_title,
"values": [
{'x': i, 'y': v}
for i, v in ys.iteritems()]
}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
'Kerry Rodden '
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>')
fieldsets = ({
'label': None,
'fields': (
'groupby',
'metric', 'secondary_metric',
'row_limit',
)
},)
form_overrides = {
'metric': {
'label': _('Primary Metric'),
'description': _(
"The primary metric is used to "
"define the arc segment sizes"),
},
'secondary_metric': {
'label': _('Secondary Metric'),
'description': _(
"This secondary metric is used to "
"define the color as a ratio against the primary metric. "
"If the two metrics match, color is mapped level groups"),
},
'groupby': {
'label': _('Hierarchy'),
'description': _("This defines the level of the hierarchy"),
},
}
def get_df(self, query_obj=None):
df = super(SunburstViz, self).get_df(query_obj)
return df
def get_data(self):
df = self.get_df()
# if m1 == m2 duplicate the metric column
cols = self.form_data.get('groupby')
metric = self.form_data.get('metric')
secondary_metric = self.form_data.get('secondary_metric')
if metric == secondary_metric:
ndf = df
ndf.columns = [cols + ['m1', 'm2']]
else:
cols += [
self.form_data['metric'], self.form_data['secondary_metric']]
ndf = df[cols]
return json.loads(ndf.to_json(orient="values")) # TODO fix this nonsense
def query_obj(self):
qry = super(SunburstViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
fieldsets = ({
'label': None,
'fields': (
'groupby',
'metric',
'row_limit',
)
},)
form_overrides = {
'groupby': {
'label': _('Source / Target'),
'description': _("Choose a source and a target"),
},
}
def query_obj(self):
qry = super(SankeyViz, self).query_obj()
if len(qry['groupby']) != 2:
raise Exception("Pick exactly 2 columns as [Source / Target]")
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self):
df = self.get_df()
df.columns = ['source', 'target', 'value']
recs = df.to_dict(orient='records')
hierarchy = defaultdict(set)
for row in recs:
hierarchy[row['source']].add(row['target'])
def find_cycle(g):
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
for v in g:
cycle = visit(v)
if cycle:
return cycle
cycle = find_cycle(hierarchy)
if cycle:
raise Exception(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}".format(cycle))
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'groupby',
'metric',
'row_limit',
)
}, {
'label': _('Force Layout'),
'fields': (
'link_length',
'charge',
)
},)
form_overrides = {
'groupby': {
'label': _('Source / Target'),
'description': _("Choose a source and a target"),
},
}
def query_obj(self):
qry = super(DirectedForceViz, self).query_obj()
if len(self.form_data['groupby']) != 2:
raise Exception("Pick exactly 2 columns to 'Group By'")
qry['metrics'] = [self.form_data['metric']]
return qry
def get_data(self):
df = self.get_df()
df.columns = ['source', 'target', 'value']
return df.to_dict(orient='records')
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
fieldsets = ({
'label': None,
'fields': (
'entity',
'country_fieldtype',
'metric',
)
}, {
'label': _('Bubbles'),
'fields': (
('show_bubbles', None),
'secondary_metric',
'max_bubble_size',
)
})
form_overrides = {
'entity': {
'label': _('Country Field'),
'description': _("3 letter code of the country"),
},
'metric': {
'label': _('Metric for color'),
'description': _("Metric that defines the color of the country"),
},
'secondary_metric': {
'label': _('Bubble size'),
'description': _("Metric that defines the size of the bubble"),
},
}
def query_obj(self):
qry = super(WorldMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self):
from caravel.data import countries
df = self.get_df()
cols = [self.form_data.get('entity')]
metric = self.form_data.get('metric')
secondary_metric = self.form_data.get('secondary_metric')
if metric == secondary_metric:
ndf = df[cols]
# df[metric] will be a DataFrame
# because there are duplicate column names
ndf['m1'] = df[metric].iloc[:, 0]
ndf['m2'] = ndf['m1']
else:
cols += [metric, secondary_metric]
ndf = df[cols]
df = ndf
df.columns = ['country', 'm1', 'm2']
d = df.to_dict(orient='records')
for row in d:
country = None
if isinstance(row['country'], string_types):
country = countries.get(
self.form_data.get('country_fieldtype'), row['country'])
if country:
row['country'] = country['cca3']
row['latitude'] = country['lat']
row['longitude'] = country['lng']
row['name'] = country['name']
else:
row['country'] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
fieldsets = ({
'label': None,
'fields': (
'groupby',
'metric',
)
},)
form_overrides = {
'groupby': {
'label': _('Filter fields'),
'description': _("The fields you want to filter on"),
},
}
def query_obj(self):
qry = super(FilterBoxViz, self).query_obj()
groupby = self.form_data['groupby']
if len(groupby) < 1:
raise Exception("Pick at least one filter field")
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self):
qry = self.query_obj()
filters = [g for g in qry['groupby']]
d = {}
for flt in filters:
qry['groupby'] = [flt]
df = super(FilterBoxViz, self).get_df(qry)
d[flt] = [{
'id': row[0],
'text': row[0],
'filter': flt,
'metric': row[1]}
for row in df.itertuples(index=False)
]
return d
class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
viz_type = "iframe"
verbose_name = _("iFrame")
credits = 'a <a href="https://github.com/airbnb/caravel">Caravel</a> original'
is_timeseries = False
fieldsets = ({
'label': None,
'fields': ('url',)
},)
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
'Syntagmatic\'s library</a>')
is_timeseries = False
fieldsets = ({
'label': None,
'fields': (
'series',
'metrics',
'secondary_metric',
'limit',
('show_datatable', 'include_series'),
)
},)
def query_obj(self):
d = super(ParallelCoordinatesViz, self).query_obj()
fd = self.form_data
d['metrics'] = copy.copy(fd.get('metrics'))
second = fd.get('secondary_metric')
if second not in d['metrics']:
d['metrics'] += [second]
d['groupby'] = [fd.get('series')]
return d
def get_data(self):
df = self.get_df()
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
'bl.ocks.org</a>')
fieldsets = ({
'label': None,
'fields': (
'all_columns_x',
'all_columns_y',
'metric',
)
}, {
'label': _('Heatmap Options'),
'fields': (
'linear_color_scheme',
('xscale_interval', 'yscale_interval'),
'canvas_image_rendering',
'normalize_across',
)
},)
def query_obj(self):
d = super(HeatmapViz, self).query_obj()
fd = self.form_data
d['metrics'] = [fd.get('metric')]
d['groupby'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
return d
def get_data(self):
df = self.get_df()
fd = self.form_data
x = fd.get('all_columns_x')
y = fd.get('all_columns_y')
v = fd.get('metric')
if x == y:
df.columns = ['x', 'y', 'v']
else:
df = df[[x, y, v]]
df.columns = ['x', 'y', 'v']
norm = fd.get('normalize_across')
overall = False
if norm == 'heatmap':
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df['perc'] = (
gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min()))
)
if overall:
v = df.v
min_ = v.min()
df['perc'] = (v - min_) / (v.max() - min_)
return df.to_dict(orient="records")
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
'd3-horizon-chart</a>')
fieldsets = [NVD3TimeSeriesViz.fieldsets[0]] + [{
'label': _('Chart Options'),
'fields': (
('series_height', 'horizon_color_scale'),
), }]
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = (
'<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>')
fieldsets = ({
'label': None,
'fields': (
('all_columns_x', 'all_columns_y'),
'clustering_radius',
'row_limit',
'groupby',
'render_while_dragging',
)
}, {
'label': _('Points'),
'fields': (
'point_radius',
'point_radius_unit',
)
}, {
'label': _('Labelling'),
'fields': (
'mapbox_label',
'pandas_aggfunc',
)
}, {
'label': _('Visual Tweaks'),
'fields': (
'mapbox_style',
'global_opacity',
'mapbox_color',
)
}, {
'label': _('Viewport'),
'fields': (
'viewport_longitude',
'viewport_latitude',
'viewport_zoom',
)
},)
form_overrides = {
'all_columns_x': {
'label': _('Longitude'),
'description': _("Column containing longitude data"),
},
'all_columns_y': {
'label': _('Latitude'),
'description': _("Column containing latitude data"),
},
'pandas_aggfunc': {
'label': _('Cluster label aggregator'),
'description': _(
"Aggregate function applied to the list of points "
"in each cluster to produce the cluster label."),
},
'rich_tooltip': {
'label': _('Tooltip'),
'description': _(
"Show a tooltip when hovering over points and clusters "
"describing the label"),
},
'groupby': {
'description': _(
"One or many fields to group by. If grouping, latitude "
"and longitude columns must be present."),
},
}
def query_obj(self):
d = super(MapboxViz, self).query_obj()
fd = self.form_data
label_col = fd.get('mapbox_label')
if not fd.get('groupby'):
d['columns'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise Exception(
"Must have a [Group By] column to have 'count' as the [Label]")
d['columns'].append(label_col[0])
if fd.get('point_radius') != 'Auto':
d['columns'].append(fd.get('point_radius'))
d['columns'] = list(set(d['columns']))
else:
# Ensuring columns chosen are all in group by
if (label_col and len(label_col) >= 1 and
label_col[0] != "count" and
label_col[0] not in fd.get('groupby')):
raise Exception(
"Choice of [Label] must be present in [Group By]")
if (fd.get("point_radius") != "Auto" and
fd.get("point_radius") not in fd.get('groupby')):
raise Exception(
"Choice of [Point Radius] must be present in [Group By]")
if (fd.get('all_columns_x') not in fd.get('groupby') or
fd.get('all_columns_y') not in fd.get('groupby')):
raise Exception(
"[Longitude] and [Latitude] columns must be present in [Group By]")
return d
def get_data(self):
df = self.get_df()
fd = self.form_data
label_col = fd.get('mapbox_label')
custom_metric = label_col and len(label_col) >= 1
metric_col = [None] * len(df.index)
if custom_metric:
if label_col[0] == fd.get('all_columns_x'):
metric_col = df[fd.get('all_columns_x')]
elif label_col[0] == fd.get('all_columns_y'):
metric_col = df[fd.get('all_columns_y')]
else:
metric_col = df[label_col[0]]
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")])
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"metric": metric,
"radius": point_radius,
},
"geometry": {
"type": "Point",
"coordinates": [lon, lat],
}
}
for lon, lat, metric, point_radius
in zip(
df[fd.get('all_columns_x')],
df[fd.get('all_columns_y')],
metric_col, point_radius_col)
]
}
return {
"geoJSON": geo_json,
"customMetric": custom_metric,
"mapboxApiKey": config.get('MAPBOX_API_KEY'),
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"viewportLongitude": fd.get("viewport_longitude"),
"viewportLatitude": fd.get("viewport_latitude"),
"viewportZoom": fd.get("viewport_zoom"),
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
viz_types_list = [
TableViz,
PivotTableViz,
NVD3TimeSeriesViz,
NVD3CompareTimeSeriesViz,
NVD3TimeSeriesStackedViz,
NVD3TimeSeriesBarViz,
DistributionBarViz,
DistributionPieViz,
BubbleViz,
MarkupViz,
WordCloudViz,
BigNumberViz,
BigNumberTotalViz,
SunburstViz,
DirectedForceViz,
SankeyViz,
WorldMapViz,
FilterBoxViz,
IFrameViz,
ParallelCoordinatesViz,
HeatmapViz,
BoxPlotViz,
TreemapViz,
CalHeatmapViz,
HorizonViz,
MapboxViz,
HistogramViz,
SeparatorViz,
]
viz_types = OrderedDict([(v.viz_type, v) for v in viz_types_list
if v.viz_type not in config.get('VIZ_TYPE_BLACKLIST')])
| georgeke/caravel | caravel/viz.py | Python | apache-2.0 | 62,304 | [
"VisIt"
] | 140fe23acfd242848c0d2cde5a17773403bafe413894ccea84fe981c4e9d3b2d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.