repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
bikashgupta11/javarobot | src/main/resources/jython/Lib/pawt/colors.py | 112 | 4500 | from java.awt import Color
aliceblue = Color(240, 248, 255)
antiquewhite = Color(250, 235, 215)
aqua = Color(0, 255, 255)
aquamarine = Color(127, 255, 212)
azure = Color(240, 255, 255)
beige = Color(245, 245, 220)
bisque = Color(255, 228, 196)
black = Color(0, 0, 0)
blanchedalmond = Color(255, 235, 205)
blue = Color(0, 0, 255)
blueviolet = Color(138, 43, 226)
brown = Color(165, 42, 42)
burlywood = Color(222, 184, 135)
cadetblue = Color(95, 158, 160)
chartreuse = Color(127, 255, 0)
chocolate = Color(210, 105, 30)
coral = Color(255, 127, 80)
cornflowerblue = Color(100, 149, 237)
cornsilk = Color(255, 248, 220)
crimson = Color(220, 20, 60)
cyan = Color(0, 255, 255)
darkblue = Color(0, 0, 139)
darkcyan = Color(0, 139, 139)
darkgoldenrod = Color(184, 134, 11)
darkgray = Color(169, 169, 169)
darkgreen = Color(0, 100, 0)
darkkhaki = Color(189, 183, 107)
darkmagenta = Color(139, 0, 139)
darkolivegreen = Color(85, 107, 47)
darkorange = Color(255, 140, 0)
darkorchid = Color(153, 50, 204)
darkred = Color(139, 0, 0)
darksalmon = Color(233, 150, 122)
darkseagreen = Color(143, 188, 143)
darkslateblue = Color(72, 61, 139)
darkslategray = Color(47, 79, 79)
darkturquoise = Color(0, 206, 209)
darkviolet = Color(148, 0, 211)
deeppink = Color(255, 20, 147)
deepskyblue = Color(0, 191, 255)
dimgray = Color(105, 105, 105)
dodgerblue = Color(30, 144, 255)
firebrick = Color(178, 34, 34)
floralwhite = Color(255, 250, 240)
forestgreen = Color(34, 139, 34)
fuchsia = Color(255, 0, 255)
gainsboro = Color(220, 220, 220)
ghostwhite = Color(248, 248, 255)
gold = Color(255, 215, 0)
goldenrod = Color(218, 165, 32)
gray = Color(128, 128, 128)
green = Color(0, 128, 0)
greenyellow = Color(173, 255, 47)
honeydew = Color(240, 255, 240)
hotpink = Color(255, 105, 180)
indianred = Color(205, 92, 92)
indigo = Color(75, 0, 130)
ivory = Color(255, 255, 240)
khaki = Color(240, 230, 140)
lavender = Color(230, 230, 250)
lavenderblush = Color(255, 240, 245)
lawngreen = Color(124, 252, 0)
lemonchiffon = Color(255, 250, 205)
lightblue = Color(173, 216, 230)
lightcoral = Color(240, 128, 128)
lightcyan = Color(224, 255, 255)
lightgoldenrodyellow = Color(250, 250, 210)
lightgreen = Color(144, 238, 144)
lightgrey = Color(211, 211, 211)
lightpink = Color(255, 182, 193)
lightsalmon = Color(255, 160, 122)
lightseagreen = Color(32, 178, 170)
lightskyblue = Color(135, 206, 250)
lightslategray = Color(119, 136, 153)
lightsteelblue = Color(176, 196, 222)
lightyellow = Color(255, 255, 224)
lime = Color(0, 255, 0)
limegreen = Color(50, 205, 50)
linen = Color(250, 240, 230)
magenta = Color(255, 0, 255)
maroon = Color(128, 0, 0)
mediumaquamarine = Color(102, 205, 170)
mediumblue = Color(0, 0, 205)
mediumorchid = Color(186, 85, 211)
mediumpurple = Color(147, 112, 219)
mediumseagreen = Color(60, 179, 113)
mediumslateblue = Color(123, 104, 238)
mediumspringgreen = Color(0, 250, 154)
mediumturquoise = Color(72, 209, 204)
mediumvioletred = Color(199, 21, 133)
midnightblue = Color(25, 25, 112)
mintcream = Color(245, 255, 250)
mistyrose = Color(255, 228, 225)
moccasin = Color(255, 228, 181)
navajowhite = Color(255, 222, 173)
navy = Color(0, 0, 128)
oldlace = Color(253, 245, 230)
olive = Color(128, 128, 0)
olivedrab = Color(107, 142, 35)
orange = Color(255, 165, 0)
orangered = Color(255, 69, 0)
orchid = Color(218, 112, 214)
palegoldenrod = Color(238, 232, 170)
palegreen = Color(152, 251, 152)
paleturquoise = Color(175, 238, 238)
palevioletred = Color(219, 112, 147)
papayawhip = Color(255, 239, 213)
peachpuff = Color(255, 218, 185)
peru = Color(205, 133, 63)
pink = Color(255, 192, 203)
plum = Color(221, 160, 221)
powderblue = Color(176, 224, 230)
purple = Color(128, 0, 128)
red = Color(255, 0, 0)
rosybrown = Color(188, 143, 143)
royalblue = Color(65, 105, 225)
saddlebrown = Color(139, 69, 19)
salmon = Color(250, 128, 114)
sandybrown = Color(244, 164, 96)
seagreen = Color(46, 139, 87)
seashell = Color(255, 245, 238)
sienna = Color(160, 82, 45)
silver = Color(192, 192, 192)
skyblue = Color(135, 206, 235)
slateblue = Color(106, 90, 205)
slategray = Color(112, 128, 144)
snow = Color(255, 250, 250)
springgreen = Color(0, 255, 127)
steelblue = Color(70, 130, 180)
tan = Color(210, 180, 140)
teal = Color(0, 128, 128)
thistle = Color(216, 191, 216)
tomato = Color(255, 99, 71)
turquoise = Color(64, 224, 208)
violet = Color(238, 130, 238)
wheat = Color(245, 222, 179)
white = Color(255, 255, 255)
whitesmoke = Color(245, 245, 245)
yellow = Color(255, 255, 0)
yellowgreen = Color(154, 205, 50)
del Color
| gpl-3.0 |
sve-odoo/odoo | addons/l10n_it/__openerp__.py | 165 | 2072 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011-2012
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Italy - Accounting',
'version': '0.2',
'depends': ['base_vat','account_chart','base_iban'],
'author': 'OpenERP Italian Community',
'description': """
Piano dei conti italiano di un'impresa generica.
================================================
Italian accounting chart and localization.
""",
'license': 'AGPL-3',
'category': 'Localization/Account Charts',
'website': 'http://www.openerp-italia.org/',
'data': [
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'account_chart.xml',
'data/account.tax.template.csv',
'data/account.fiscal.position.template.csv',
'l10n_chart_it_generic.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
'images': ['images/config_chart_l10n_it.jpeg','images/l10n_it_chart.jpeg'],
}
| agpl-3.0 |
webmasterraj/GaSiProMo | flask/lib/python2.7/site-packages/numpy/lib/tests/test_financial.py | 108 | 6607 | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_almost_equal,
assert_allclose
)
class TestFinancial(TestCase):
def test_rate(self):
assert_almost_equal(np.rate(10, 0, -3500, 10000),
0.1107, 4)
def test_irr(self):
v = [-150000, 15000, 25000, 35000, 45000, 60000]
assert_almost_equal(np.irr(v), 0.0524, 2)
v = [-100, 0, 0, 74]
assert_almost_equal(np.irr(v), -0.0955, 2)
v = [-100, 39, 59, 55, 20]
assert_almost_equal(np.irr(v), 0.28095, 2)
v = [-100, 100, 0, -7]
assert_almost_equal(np.irr(v), -0.0833, 2)
v = [-100, 100, 0, 7]
assert_almost_equal(np.irr(v), 0.06206, 2)
v = [-5, 10.5, 1, -8, 1]
assert_almost_equal(np.irr(v), 0.0886, 2)
def test_pv(self):
assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2)
def test_fv(self):
assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.36, 2)
def test_pmt(self):
res = np.pmt(0.08/12, 5*12, 15000)
tgt = -304.145914
assert_allclose(res, tgt)
# Test the edge case where rate == 0.0
res = np.pmt(0.0, 5*12, 15000)
tgt = -250.0
assert_allclose(res, tgt)
# Test the case where we use broadcast and
# the arguments passed in are arrays.
res = np.pmt([[0.0, 0.8],[0.3, 0.8]],[12, 3],[2000, 20000])
tgt = np.array([[-166.66667, -19311.258],[-626.90814, -19311.258]])
assert_allclose(res, tgt)
def test_ppmt(self):
np.round(np.ppmt(0.1/12, 1, 60, 55000), 2) == 710.25
def test_ipmt(self):
np.round(np.ipmt(0.1/12, 1, 24, 2000), 2) == 16.67
def test_nper(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
21.54, 2)
def test_nper2(self):
assert_almost_equal(np.nper(0.0, -2000, 0, 100000.),
50.0, 1)
def test_npv(self):
assert_almost_equal(
np.npv(0.05, [-15000, 1500, 2500, 3500, 4500, 6000]),
122.89, 2)
def test_mirr(self):
val = [-4500, -800, 800, 800, 600, 600, 800, 800, 700, 3000]
assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
val = [-120000, 39000, 30000, 21000, 37000, 46000]
assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6)
val = [100, 200, -50, 300, -200]
assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4)
val = [39000, 30000, 21000, 37000, 46000]
assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
def test_when(self):
#begin
assert_almost_equal(np.rate(10, 20, -3500, 10000, 1),
np.rate(10, 20, -3500, 10000, 'begin'), 4)
#end
assert_almost_equal(np.rate(10, 20, -3500, 10000),
np.rate(10, 20, -3500, 10000, 'end'), 4)
assert_almost_equal(np.rate(10, 20, -3500, 10000, 0),
np.rate(10, 20, -3500, 10000, 'end'), 4)
# begin
assert_almost_equal(np.pv(0.07, 20, 12000, 0, 1),
np.pv(0.07, 20, 12000, 0, 'begin'), 2)
# end
assert_almost_equal(np.pv(0.07, 20, 12000, 0),
np.pv(0.07, 20, 12000, 0, 'end'), 2)
assert_almost_equal(np.pv(0.07, 20, 12000, 0, 0),
np.pv(0.07, 20, 12000, 0, 'end'), 2)
# begin
assert_almost_equal(np.fv(0.075, 20, -2000, 0, 1),
np.fv(0.075, 20, -2000, 0, 'begin'), 4)
# end
assert_almost_equal(np.fv(0.075, 20, -2000, 0),
np.fv(0.075, 20, -2000, 0, 'end'), 4)
assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0),
np.fv(0.075, 20, -2000, 0, 'end'), 4)
# begin
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 1),
np.pmt(0.08/12, 5*12, 15000., 0, 'begin'), 4)
# end
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0),
np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4)
assert_almost_equal(np.pmt(0.08/12, 5*12, 15000., 0, 0),
np.pmt(0.08/12, 5*12, 15000., 0, 'end'), 4)
# begin
assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 1),
np.ppmt(0.1/12, 1, 60, 55000, 0, 'begin'), 4)
# end
assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0),
np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4)
assert_almost_equal(np.ppmt(0.1/12, 1, 60, 55000, 0, 0),
np.ppmt(0.1/12, 1, 60, 55000, 0, 'end'), 4)
# begin
assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 1),
np.ipmt(0.1/12, 1, 24, 2000, 0, 'begin'), 4)
# end
assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0),
np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4)
assert_almost_equal(np.ipmt(0.1/12, 1, 24, 2000, 0, 0),
np.ipmt(0.1/12, 1, 24, 2000, 0, 'end'), 4)
# begin
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 1),
np.nper(0.075, -2000, 0, 100000., 'begin'), 4)
# end
assert_almost_equal(np.nper(0.075, -2000, 0, 100000.),
np.nper(0.075, -2000, 0, 100000., 'end'), 4)
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., 0),
np.nper(0.075, -2000, 0, 100000., 'end'), 4)
def test_broadcast(self):
assert_almost_equal(np.nper(0.075, -2000, 0, 100000., [0, 1]),
[21.5449442, 20.76156441], 4)
assert_almost_equal(np.ipmt(0.1/12, list(range(5)), 24, 2000),
[-17.29165168, -16.66666667, -16.03647345,
-15.40102862, -14.76028842], 4)
assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000),
[-74.998201, -75.62318601, -76.25337923,
-76.88882405, -77.52956425], 4)
assert_almost_equal(np.ppmt(0.1/12, list(range(5)), 24, 2000, 0,
[0, 0, 1, 'end', 'begin']),
[-74.998201, -75.62318601, -75.62318601,
-76.88882405, -76.88882405], 4)
if __name__ == "__main__":
run_module_suite()
| gpl-2.0 |
valkyriesavage/invenio | modules/websubmit/lib/websubmitadmin_regression_tests.py | 13 | 2876 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""WebSubmit Admin Regression Test Suite."""
__revision__ = "$Id$"
import unittest
from invenio.config import CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite, \
test_web_page_content, merge_error_messages
class WebSubmitAdminWebPagesAvailabilityTest(unittest.TestCase):
"""Check WebSubmit Admin web pages whether they are up or not."""
def test_websubmit_admin_interface_pages_availability(self):
"""websubmitadmin - availability of WebSubmit Admin interface pages"""
baseurl = CFG_SITE_URL + '/admin/websubmit/websubmitadmin.py/'
_exports = ['', 'showall', 'doctypelist', 'doctypeadd',
'doctyperemove', 'actionlist', 'jschecklist',
'elementlist', 'functionlist']
error_messages = []
for url in [baseurl + page for page in _exports]:
# first try as guest:
error_messages.extend(test_web_page_content(url,
username='guest',
expected_text=
'Authorization failure'))
# then try as admin:
error_messages.extend(test_web_page_content(url,
username='admin'))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_websubmit_admin_guide_availability(self):
"""websubmitadmin - availability of WebSubmit Admin guide pages"""
url = CFG_SITE_URL + '/help/admin/websubmit-admin-guide'
error_messages = test_web_page_content(url,
expected_text="WebSubmit Admin Guide")
if error_messages:
self.fail(merge_error_messages(error_messages))
return
TEST_SUITE = make_test_suite(WebSubmitAdminWebPagesAvailabilityTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 |
brianlsharp/MissionPlanner | Lib/site-packages/scipy/linalg/decomp_svd.py | 53 | 5103 | """SVD decomposition functions."""
import numpy
from numpy import asarray_chkfinite, zeros, r_, diag
from scipy.linalg import calc_lwork
# Local imports.
from misc import LinAlgError, _datacopied
from lapack import get_lapack_funcs
from funcinfo import get_func_info
def svd(a, full_matrices=True, compute_uv=True, overwrite_a=False):
"""Singular Value Decomposition.
Factorizes the matrix a into two unitary matrices U and Vh and
an 1d-array s of singular values (real, non-negative) such that
a == U S Vh if S is an suitably shaped matrix of zeros whose
main diagonal is s.
Parameters
----------
a : array, shape (M, N)
Matrix to decompose
full_matrices : boolean
If true, U, Vh are shaped (M,M), (N,N)
If false, the shapes are (M,K), (K,N) where K = min(M,N)
compute_uv : boolean
Whether to compute also U, Vh in addition to s (Default: true)
overwrite_a : boolean
Whether data in a is overwritten (may improve performance)
Returns
-------
U: array, shape (M,M) or (M,K) depending on full_matrices
s: array, shape (K,)
The singular values, sorted so that s[i] >= s[i+1]. K = min(M, N)
Vh: array, shape (N,N) or (K,N) depending on full_matrices
For compute_uv = False, only s is returned.
Raises LinAlgError if SVD computation does not converge
Examples
--------
>>> from scipy import random, linalg, allclose, dot
>>> a = random.randn(9, 6) + 1j*random.randn(9, 6)
>>> U, s, Vh = linalg.svd(a)
>>> U.shape, Vh.shape, s.shape
((9, 9), (6, 6), (6,))
>>> U, s, Vh = linalg.svd(a, full_matrices=False)
>>> U.shape, Vh.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = linalg.diagsvd(s, 6, 6)
>>> allclose(a, dot(U, dot(S, Vh)))
True
>>> s2 = linalg.svd(a, compute_uv=False)
>>> allclose(s, s2)
True
See also
--------
svdvals : return singular values of a matrix
diagsvd : return the Sigma matrix, given the vector s
"""
# A hack until full_matrices == 0 support is fixed here.
if full_matrices == 0:
import numpy.linalg
return numpy.linalg.svd(a, full_matrices=0, compute_uv=compute_uv)
a1 = asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
m,n = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
gesdd, = get_lapack_funcs(('gesdd',), (a1,))
gesdd_info = get_func_info(gesdd)
if gesdd_info.module_name[:7] == 'flapack':
lwork = calc_lwork.gesdd(gesdd_info.prefix, m, n, compute_uv)[1]
u,s,v,info = gesdd(a1,compute_uv = compute_uv, lwork = lwork,
overwrite_a = overwrite_a)
else: # 'clapack'
raise NotImplementedError('calling gesdd from %s' % gesdd_info.module_name)
if info > 0:
raise LinAlgError("SVD did not converge")
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gesdd'
% -info)
if compute_uv:
return u, s, v
else:
return s
def svdvals(a, overwrite_a=False):
"""Compute singular values of a matrix.
Parameters
----------
a : array, shape (M, N)
Matrix to decompose
overwrite_a : boolean
Whether data in a is overwritten (may improve performance)
Returns
-------
s: array, shape (K,)
The singular values, sorted so that s[i] >= s[i+1]. K = min(M, N)
Raises LinAlgError if SVD computation does not converge
See also
--------
svd : return the full singular value decomposition of a matrix
diagsvd : return the Sigma matrix, given the vector s
"""
return svd(a, compute_uv=0, overwrite_a=overwrite_a)
def diagsvd(s, M, N):
"""Construct the sigma matrix in SVD from singular values and size M,N.
Parameters
----------
s : array, shape (M,) or (N,)
Singular values
M : integer
N : integer
Size of the matrix whose singular values are s
Returns
-------
S : array, shape (M, N)
The S-matrix in the singular value decomposition
"""
part = diag(s)
typ = part.dtype.char
MorN = len(s)
if MorN == M:
return r_['-1', part, zeros((M, N-M), typ)]
elif MorN == N:
return r_[part, zeros((M-N,N), typ)]
else:
raise ValueError("Length of s must be M or N.")
# Orthonormal decomposition
def orth(A):
"""Construct an orthonormal basis for the range of A using SVD
Parameters
----------
A : array, shape (M, N)
Returns
-------
Q : array, shape (M, K)
Orthonormal basis for the range of A.
K = effective rank of A, as determined by automatic cutoff
See also
--------
svd : Singular value decomposition of a matrix
"""
u, s, vh = svd(A)
M, N = A.shape
eps = numpy.finfo(float).eps
tol = max(M,N) * numpy.amax(s) * eps
num = numpy.sum(s > tol, dtype=int)
Q = u[:,:num]
return Q
| gpl-3.0 |
nkgilley/home-assistant | tests/components/openalpr_local/test_image_processing.py | 13 | 5024 | """The tests for the openalpr local platform."""
import homeassistant.components.image_processing as ip
from homeassistant.const import ATTR_ENTITY_PICTURE
from homeassistant.core import callback
from homeassistant.setup import setup_component
from tests.async_mock import MagicMock, PropertyMock, patch
from tests.common import assert_setup_component, get_test_home_assistant, load_fixture
from tests.components.image_processing import common
def mock_async_subprocess():
"""Get a Popen mock back."""
async_popen = MagicMock()
async def communicate(input=None):
"""Communicate mock."""
fixture = bytes(load_fixture("alpr_stdout.txt"), "utf-8")
return (fixture, None)
async_popen.communicate = communicate
return async_popen
class TestOpenAlprLocalSetup:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_platform(self):
"""Set up platform with one entity."""
config = {
ip.DOMAIN: {
"platform": "openalpr_local",
"source": {"entity_id": "camera.demo_camera"},
"region": "eu",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.openalpr_demo_camera")
def test_setup_platform_name(self):
"""Set up platform with one entity and set name."""
config = {
ip.DOMAIN: {
"platform": "openalpr_local",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
},
"camera": {"platform": "demo"},
}
with assert_setup_component(1, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
assert self.hass.states.get("image_processing.test_local")
def test_setup_platform_without_region(self):
"""Set up platform with one entity without region."""
config = {
ip.DOMAIN: {
"platform": "openalpr_local",
"source": {"entity_id": "camera.demo_camera"},
},
"camera": {"platform": "demo"},
}
with assert_setup_component(0, ip.DOMAIN):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
class TestOpenAlprLocal:
"""Test class for image processing."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
config = {
ip.DOMAIN: {
"platform": "openalpr_local",
"source": {"entity_id": "camera.demo_camera", "name": "test local"},
"region": "eu",
},
"camera": {"platform": "demo"},
}
with patch(
"homeassistant.components.openalpr_local.image_processing."
"OpenAlprLocalEntity.should_poll",
new_callable=PropertyMock(return_value=False),
):
setup_component(self.hass, ip.DOMAIN, config)
self.hass.block_till_done()
state = self.hass.states.get("camera.demo_camera")
self.url = f"{self.hass.config.internal_url}{state.attributes.get(ATTR_ENTITY_PICTURE)}"
self.alpr_events = []
@callback
def mock_alpr_event(event):
"""Mock event."""
self.alpr_events.append(event)
self.hass.bus.listen("image_processing.found_plate", mock_alpr_event)
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
@patch("asyncio.create_subprocess_exec", return_value=mock_async_subprocess())
def test_openalpr_process_image(self, popen_mock, aioclient_mock):
"""Set up and scan a picture and test plates from event."""
aioclient_mock.get(self.url, content=b"image")
common.scan(self.hass, entity_id="image_processing.test_local")
self.hass.block_till_done()
state = self.hass.states.get("image_processing.test_local")
assert popen_mock.called
assert len(self.alpr_events) == 5
assert state.attributes.get("vehicles") == 1
assert state.state == "PE3R2X"
event_data = [
event.data
for event in self.alpr_events
if event.data.get("plate") == "PE3R2X"
]
assert len(event_data) == 1
assert event_data[0]["plate"] == "PE3R2X"
assert event_data[0]["confidence"] == float(98.9371)
assert event_data[0]["entity_id"] == "image_processing.test_local"
| apache-2.0 |
eXistenZNL/SickRage | tornado/util.py | 102 | 12256 | """Miscellaneous utility functions and classes.
This module is used internally by Tornado. It is not necessarily expected
that the functions and classes defined here will be useful to other
applications, but they are documented here in case they are.
The one public-facing part of this module is the `Configurable` class
and its `~Configurable.configure` method, which becomes a part of the
interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`,
and `.Resolver`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import array
import inspect
import os
import sys
import zlib
try:
xrange # py2
except NameError:
xrange = range # py3
class ObjectDict(dict):
"""Makes a dictionary behave like an object, with attribute-style access.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
class GzipDecompressor(object):
"""Streaming gzip decompressor.
The interface is like that of `zlib.decompressobj` (without some of the
optional arguments, but it understands gzip headers and checksums.
"""
def __init__(self):
# Magic parameter makes zlib module understand gzip header
# http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib
# This works on cpython and pypy, but not jython.
self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def decompress(self, value, max_length=None):
"""Decompress a chunk, returning newly-available data.
Some data may be buffered for later processing; `flush` must
be called when there is no more input data to ensure that
all data was processed.
If ``max_length`` is given, some input data may be left over
in ``unconsumed_tail``; you must retrieve this value and pass
it back to a future call to `decompress` if it is not empty.
"""
return self.decompressobj.decompress(value, max_length)
@property
def unconsumed_tail(self):
"""Returns the unconsumed portion left over
"""
return self.decompressobj.unconsumed_tail
def flush(self):
"""Return any remaining buffered data not yet returned by decompress.
Also checks for errors such as truncated input.
No other methods may be called on this object after `flush`.
"""
return self.decompressobj.flush()
def import_object(name):
"""Imports an object by name.
import_object('x') is equivalent to 'import x'.
import_object('x.y.z') is equivalent to 'from x.y import z'.
>>> import tornado.escape
>>> import_object('tornado.escape') is tornado.escape
True
>>> import_object('tornado.escape.utf8') is tornado.escape.utf8
True
>>> import_object('tornado') is tornado
True
>>> import_object('tornado.missing_module')
Traceback (most recent call last):
...
ImportError: No module named missing_module
"""
if name.count('.') == 0:
return __import__(name, None, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
# Fake unicode literal support: Python 3.2 doesn't have the u'' marker for
# literal strings, and alternative solutions like "from __future__ import
# unicode_literals" have other problems (see PEP 414). u() can be applied
# to ascii strings that include \u escapes (but they must not contain
# literal non-ascii characters).
if type('') is not type(b''):
def u(s):
return s
unicode_type = str
basestring_type = str
else:
def u(s):
return s.decode('unicode_escape')
unicode_type = unicode
basestring_type = basestring
# Deprecated alias that was used before we dropped py25 support.
# Left here in case anyone outside Tornado is using it.
bytes_type = bytes
if sys.version_info > (3,):
exec("""
def raise_exc_info(exc_info):
raise exc_info[1].with_traceback(exc_info[2])
def exec_in(code, glob, loc=None):
if isinstance(code, str):
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec(code, glob, loc)
""")
else:
exec("""
def raise_exc_info(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def exec_in(code, glob, loc=None):
if isinstance(code, basestring):
# exec(string) inherits the caller's future imports; compile
# the string first to prevent that.
code = compile(code, '<string>', 'exec', dont_inherit=True)
exec code in glob, loc
""")
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instantiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
class Configurable(object):
"""Base class for configurable interfaces.
A configurable interface is an (abstract) class whose constructor
acts as a factory function for one of its implementation subclasses.
The implementation subclass as well as optional keyword arguments to
its initializer can be set globally at runtime with `configure`.
By using the constructor as the factory method, the interface
looks like a normal class, `isinstance` works as usual, etc. This
pattern is most useful when the choice of implementation is likely
to be a global decision (e.g. when `~select.epoll` is available,
always use it instead of `~select.select`), or when a
previously-monolithic class has been split into specialized
subclasses.
Configurable subclasses must define the class methods
`configurable_base` and `configurable_default`, and use the instance
method `initialize` instead of ``__init__``.
"""
__impl_class = None
__impl_kwargs = None
def __new__(cls, **kwargs):
base = cls.configurable_base()
args = {}
if cls is base:
impl = cls.configured_class()
if base.__impl_kwargs:
args.update(base.__impl_kwargs)
else:
impl = cls
args.update(kwargs)
instance = super(Configurable, cls).__new__(impl)
# initialize vs __init__ chosen for compatibility with AsyncHTTPClient
# singleton magic. If we get rid of that we can switch to __init__
# here too.
instance.initialize(**args)
return instance
@classmethod
def configurable_base(cls):
"""Returns the base class of a configurable hierarchy.
This will normally return the class in which it is defined.
(which is *not* necessarily the same as the cls classmethod parameter).
"""
raise NotImplementedError()
@classmethod
def configurable_default(cls):
"""Returns the implementation class to be used if none is configured."""
raise NotImplementedError()
def initialize(self):
"""Initialize a `Configurable` subclass instance.
Configurable classes should use `initialize` instead of ``__init__``.
"""
@classmethod
def configure(cls, impl, **kwargs):
"""Sets the class to use when the base class is instantiated.
Keyword arguments will be saved and added to the arguments passed
to the constructor. This can be used to set global defaults for
some parameters.
"""
base = cls.configurable_base()
if isinstance(impl, (unicode_type, bytes)):
impl = import_object(impl)
if impl is not None and not issubclass(impl, cls):
raise ValueError("Invalid subclass of %s" % cls)
base.__impl_class = impl
base.__impl_kwargs = kwargs
@classmethod
def configured_class(cls):
"""Returns the currently configured class."""
base = cls.configurable_base()
if cls.__impl_class is None:
base.__impl_class = cls.configurable_default()
return base.__impl_class
@classmethod
def _save_configuration(cls):
base = cls.configurable_base()
return (base.__impl_class, base.__impl_kwargs)
@classmethod
def _restore_configuration(cls, saved):
base = cls.configurable_base()
base.__impl_class = saved[0]
base.__impl_kwargs = saved[1]
class ArgReplacer(object):
"""Replaces one value in an ``args, kwargs`` pair.
Inspects the function signature to find an argument by name
whether it is passed by position or keyword. For use in decorators
and similar wrappers.
"""
def __init__(self, func, name):
self.name = name
try:
self.arg_pos = inspect.getargspec(func).args.index(self.name)
except ValueError:
# Not a positional parameter
self.arg_pos = None
def get_old_value(self, args, kwargs, default=None):
"""Returns the old value of the named argument without replacing it.
Returns ``default`` if the argument is not present.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
return args[self.arg_pos]
else:
return kwargs.get(self.name, default)
def replace(self, new_value, args, kwargs):
"""Replace the named argument in ``args, kwargs`` with ``new_value``.
Returns ``(old_value, args, kwargs)``. The returned ``args`` and
``kwargs`` objects may not be the same as the input objects, or
the input objects may be mutated.
If the named argument was not found, ``new_value`` will be added
to ``kwargs`` and None will be returned as ``old_value``.
"""
if self.arg_pos is not None and len(args) > self.arg_pos:
# The arg to replace is passed positionally
old_value = args[self.arg_pos]
args = list(args) # *args is normally a tuple
args[self.arg_pos] = new_value
else:
# The arg to replace is either omitted or passed by keyword.
old_value = kwargs.get(self.name)
kwargs[self.name] = new_value
return old_value, args, kwargs
def timedelta_to_seconds(td):
"""Equivalent to td.total_seconds() (introduced in python 2.7)."""
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6)
def _websocket_mask_python(mask, data):
"""Websocket masking function.
`mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length.
Returns a `bytes` object of the same length as `data` with the mask applied
as specified in section 5.3 of RFC 6455.
This pure-python implementation may be replaced by an optimized version when available.
"""
mask = array.array("B", mask)
unmasked = array.array("B", data)
for i in xrange(len(data)):
unmasked[i] = unmasked[i] ^ mask[i % 4]
if hasattr(unmasked, 'tobytes'):
# tostring was deprecated in py32. It hasn't been removed,
# but since we turn on deprecation warnings in our tests
# we need to use the right one.
return unmasked.tobytes()
else:
return unmasked.tostring()
if (os.environ.get('TORNADO_NO_EXTENSION') or
os.environ.get('TORNADO_EXTENSION') == '0'):
# These environment variables exist to make it easier to do performance
# comparisons; they are not guaranteed to remain supported in the future.
_websocket_mask = _websocket_mask_python
else:
try:
from tornado.speedups import websocket_mask as _websocket_mask
except ImportError:
if os.environ.get('TORNADO_EXTENSION') == '1':
raise
_websocket_mask = _websocket_mask_python
def doctests():
import doctest
return doctest.DocTestSuite()
| gpl-3.0 |
sprymix/python-dateutil | dateutil/easter.py | 202 | 2611 | # -*- coding: utf-8 -*-
"""
This module offers a generic easter computing method for any given year, using
Western, Orthodox or Julian algorithms.
"""
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError("invalid method")
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g + 15) % 30
j = (y + y//4 + i) % 7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e + y//100 - 16 - (y//100 - 16)//4
else:
# New method
c = y//100
h = (c - c//4 - (8*c + 13)//25 + 19*g + 15) % 30
i = h - (h//28)*(1 - (h//28)*(29//(h + 1))*((21 - g)//11))
j = (y + y//4 + i + 2 - c + c//4) % 7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i - j + e
d = 1 + (p + 27 + (p + 6)//40) % 31
m = 3 + (p + 26)//30
return datetime.date(int(y), int(m), int(d))
| bsd-3-clause |
flashycud/timestack | django/views/decorators/vary.py | 307 | 1285 | try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return wraps(func, assigned=available_attrs(func))(inner_func)
| mit |
gitsimon/spadup-lyra | semantics/semantics.py | 1 | 18033 | from abstract_domains.state import State
from core.expressions import BinaryArithmeticOperation, BinaryOperation, BinaryComparisonOperation, UnaryOperation, \
UnaryArithmeticOperation, UnaryBooleanOperation, BinaryBooleanOperation, Input, ListDisplay, Slice, Index, Literal
from core.statements import Statement, VariableAccess, LiteralEvaluation, Call, ListDisplayStmt, SliceStmt, IndexStmt
from functools import reduce
import re
import itertools
_first1 = re.compile(r'(.)([A-Z][a-z]+)')
_all2 = re.compile('([a-z0-9])([A-Z])')
def camel_to_snake(name: str) -> str:
"""Convert CamelCase to snake_case
:param name: name in CamelCase
:return: name in snake_case
"""
subbed = _first1.sub(r'\1_\2', name)
return _all2.sub(r'\1_\2', subbed).lower()
class Semantics:
"""Semantics of statements. Independently of the direction (forward/backward) of the analysis."""
def semantics(self, stmt: Statement, state: State) -> State:
"""Semantics of a statement.
:param stmt: statement to be executed
:param state: state before executing the statement
:return: state modified by the statement execution
"""
name = '{}_semantics'.format(camel_to_snake(stmt.__class__.__name__))
if hasattr(self, name):
return getattr(self, name)(stmt, state)
else:
raise NotImplementedError(f"Semantics for statement {stmt} of type {type(stmt)} not yet implemented! "
f"You must provide method {name}(...)")
class LiteralEvaluationSemantics(Semantics):
"""Semantics of literal evaluations."""
# noinspection PyMethodMayBeStatic
def literal_evaluation_semantics(self, stmt: LiteralEvaluation, state: State) -> State:
"""Semantics of a literal evaluation.
:param stmt: literal evaluation statement to be executed
:param state: state before executing the literal evaluation
:return: stated modified by the literal evaluation
"""
return state.evaluate_literal(stmt.literal)
class VariableAccessSemantics(Semantics):
"""Semantics of variable accesses."""
# noinspection PyMethodMayBeStatic
def variable_access_semantics(self, stmt: VariableAccess, state: State) -> State:
"""Semantics of a variable access.
:param stmt: variable access statement to be executed
:param state: state before executing the variable access
:return: state modified by the variable access
"""
return state.access_variable(stmt.var)
class ListSemantics(Semantics):
"""Semantics of list accesses."""
def list_display_stmt_semantics(self, stmt: ListDisplayStmt, state: State) -> State:
"""Semantics of a list display statement.
:param stmt :list display statement to be executed
:param state: state before executing the variable access
:return: state modified by the variable access
"""
item_sets = [list(self.semantics(item, state).result) for item in stmt.items]
products = itertools.product(*item_sets)
result = {ListDisplay(list, list(p)) for p in products}
state.result = result
return state
def slice_stmt_semantics(self, stmt: SliceStmt, state: State) -> State:
"""Semantics of a slice statement.
:param stmt: slice statement to be executed
:param state: state before executing the variable access
:return: state modified by the variable access
"""
targets = self.semantics(stmt.target, state).result
if stmt.lower:
lowers = self.semantics(stmt.lower, state).result
else:
lowers = {None}
if stmt.step:
steps = self.semantics(stmt.step, state).result
else:
steps = {None}
if stmt.upper:
uppers = self.semantics(stmt.upper, state).result
else:
uppers = {None}
products = itertools.product(targets, lowers, steps, uppers)
# TODO infer type of Slice??
result = {Slice(None, target, lower, step, upper) for target, lower, step, upper in products}
state.result = result
return state
def index_stmt_semantics(self, stmt: IndexStmt, state: State) -> State:
"""Semantics of a index statement.
:param stmt: index statement to be executed
:param state: state before executing the variable access
:return: state modified by the variable access
"""
targets = self.semantics(stmt.target, state).result
indices = self.semantics(stmt.index, state).result
products = itertools.product(targets, indices)
# TODO infer type of Slice??
result = {Index(None, target, index) for target, index in products}
state.result = result
return state
class CallSemantics(Semantics):
"""Semantics of function/method calls."""
def call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a function/method call.
:param stmt: call statement to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
name = '{}_call_semantics'.format(stmt.name)
if hasattr(self, name):
return getattr(self, name)(stmt, state)
else:
return getattr(self, 'user_defined_call_semantics')(stmt, state)
class BuiltInCallSemantics(CallSemantics):
"""Semantics of built-in function/method calls."""
# noinspection PyMethodMayBeStatic
def input_call_semantics(self, stmt: Call, state: State) -> State:
state.result = {Input(stmt.typ)}
return state
def print_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to 'print'.
:param stmt: call to 'print' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
argument = self.semantics(stmt.arguments[0], state).result # argument evaluation
return state.output(argument)
def int_call_semantics(self, stmt: Call, state: State) -> State:
if len(stmt.arguments) != 1:
raise NotImplementedError(f"No semantics implemented for the multiple arguments to int()")
state = self.semantics(stmt.arguments[0], state)
result = set()
for expr in state.result:
if isinstance(expr, Input):
result.add(Input(stmt.typ))
elif isinstance(expr, Literal):
result.add(Literal(stmt.typ, expr.val))
else:
raise NotImplementedError(f"int(arg) call is not supported for arg of type {type(expr)}")
return state
def bool_call_semantics(self, stmt: Call, state: State) -> State:
if len(stmt.arguments) != 1:
raise NotImplementedError(f"No semantics implemented for the multiple arguments to bool()")
state = self.semantics(stmt.arguments[0], state)
result = set()
for expr in state.result:
if isinstance(expr, Input):
result.add(Input(stmt.typ))
elif isinstance(expr, Literal):
result.add(Literal(stmt.typ, expr.val))
else:
raise NotImplementedError(f"bool(arg) call is not supported for arg of type {type(expr)}")
return state
def unary_operation(self, stmt: Call, operator: UnaryOperation.Operator, state: State) -> State:
assert len(stmt.arguments) == 1 # unary operations have exactly one argument
argument = self.semantics(stmt.arguments[0], state).result # argument evaluation
result = set()
if isinstance(operator, UnaryArithmeticOperation.Operator):
expression = set(UnaryArithmeticOperation(stmt.typ, operator, expr) for expr in argument)
result = result.union(expression)
elif isinstance(operator, UnaryBooleanOperation.Operator):
expression = set(UnaryBooleanOperation(stmt.typ, operator, expr) for expr in argument)
result = result.union(expression)
else:
raise NotImplementedError(
f"Semantics for statement {operator} of type {type(operator)} not yet implemented!")
state.result = result
return state
def not_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '!' (negation).
:param stmt: call to '!' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.unary_operation(stmt, UnaryBooleanOperation.Operator.Neg, state)
def binary_operation(self, stmt: Call, operator: BinaryOperation.Operator, state: State) -> State:
arguments = [self.semantics(argument, state).result for argument in stmt.arguments] # argument evaluation
result = set()
if isinstance(operator, BinaryArithmeticOperation.Operator):
expression = reduce(lambda lhs, rhs: set(
BinaryArithmeticOperation(stmt.typ, left, operator, right) for left in lhs for right in rhs
), arguments)
result = result.union(expression)
elif isinstance(operator, BinaryComparisonOperation.Operator):
expression = reduce(lambda lhs, rhs: set(
BinaryComparisonOperation(stmt.typ, left, operator, right) for left in lhs for right in rhs
), arguments)
result = result.union(expression)
elif isinstance(operator, BinaryBooleanOperation.Operator):
expression = reduce(lambda lhs, rhs: set(
BinaryBooleanOperation(stmt.typ, left, operator, right) for left in lhs for right in rhs
), arguments)
result = result.union(expression)
else:
raise NotImplementedError(
f"Semantics for statement {operator} of type {type(operator)} not yet implemented!")
state.result = result
return state
def add_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '+' (addition, not concatenation).
:param stmt: call to '+' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryArithmeticOperation.Operator.Add, state)
def sub_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '-' (subtraction).
:param stmt: call to '-' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryArithmeticOperation.Operator.Sub, state)
def mult_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '*' (multiplication, not repetition).
:param stmt: call to '*' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryArithmeticOperation.Operator.Mult, state)
def div_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '/' (division).
:param stmt: call to '/' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryArithmeticOperation.Operator.Div, state)
def uadd_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '+X' (unary plus).
:param stmt: call to '+' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.unary_operation(stmt, UnaryArithmeticOperation.Operator.Add, state)
def usub_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '-X' (unary minus).
:param stmt: call to '-' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.unary_operation(stmt, UnaryArithmeticOperation.Operator.Sub, state)
def eq_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '==' (equality).
:param stmt: call to '==' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.Eq, state)
def noteq_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '!=' (inequality).
:param stmt: call to '!=' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.NotEq, state)
def lt_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '<' (less than).
:param stmt: call to '<' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.Lt, state)
def lte_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '<=' (less than or equal to).
:param stmt: call to '<=' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.LtE, state)
def gt_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '>' (greater than).
:param stmt: call to '>' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.Gt, state)
def gte_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to '>=' (greater than or equal to).
:param stmt: call to '>=' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.GtE, state)
def is_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to 'is' (identity).
:param stmt: call to 'is' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.Is, state)
def isnot_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to 'is not' (mismatch).
:param stmt: call to 'is not' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.IsNot, state)
def in_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to 'in' (membership).
:param stmt: call to 'is' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.In, state)
def notin_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to 'not in' (non-membership).
:param stmt: call to 'not in' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryComparisonOperation.Operator.NotIn, state)
def and_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to 'and'.
:param stmt: call to 'add' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryBooleanOperation.Operator.And, state)
def or_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to 'or'.
:param stmt: call to 'or' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryBooleanOperation.Operator.Or, state)
def xor_call_semantics(self, stmt: Call, state: State) -> State:
"""Semantics of a call to 'xor'.
:param stmt: call to 'xor' to be executed
:param state: state before executing the call statement
:return: state modified by the call statement
"""
return self.binary_operation(stmt, BinaryBooleanOperation.Operator.Xor, state)
class DefaultSemantics(LiteralEvaluationSemantics, VariableAccessSemantics, ListSemantics, BuiltInCallSemantics):
"""Default semantics of statements. Independently of the direction (forward/backward) of the semantics."""
pass
| mpl-2.0 |
saurabh6790/tru_app_back | patches/may_2013/p06_make_notes.py | 29 | 1374 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes, markdown2
def execute():
webnotes.reload_doc("utilities", "doctype", "note")
webnotes.reload_doc("utilities", "doctype", "note_user")
for question in webnotes.conn.sql("""select * from tabQuestion""", as_dict=True):
if question.question:
try:
name = question.question[:180]
if webnotes.conn.exists("Note", name):
webnotes.delete_doc("Note", name)
similar_questions = webnotes.conn.sql_list("""select name from `tabQuestion`
where question like %s""", "%s%%" % name)
answers = [markdown2.markdown(c) for c in webnotes.conn.sql_list("""
select answer from tabAnswer where question in (%s)""" % \
", ".join(["%s"]*len(similar_questions)), similar_questions)]
webnotes.bean({
"doctype":"Note",
"title": name,
"content": "<hr>".join(answers),
"owner": question.owner,
"creation": question.creation,
"public": 1
}).insert()
except NameError:
pass
except Exception, e:
if e.args[0] != 1062:
raise
webnotes.delete_doc("DocType", "Question")
webnotes.delete_doc("DocType", "Answer")
# update comment delete
webnotes.conn.sql("""update tabDocPerm \
set cancel=1 where parent='Comment' and role='System Manager'""")
| agpl-3.0 |
t4skforce/pyspider | tests/test_rabbitmq.py | 3 | 2354 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-10-07 10:33:38
import os
import time
import unittest2 as unittest
from pyspider.libs import utils
from pyspider.libs import rabbitmq
@unittest.skipIf(os.environ.get('IGNORE_RABBITMQ'), 'no rabbitmq server for test.')
class TestRabbitMQ(unittest.TestCase):
@classmethod
def setUpClass(self):
with utils.timeout(3):
self.q1 = rabbitmq.Queue('test_queue', maxsize=5)
self.q2 = rabbitmq.Queue('test_queue', maxsize=5)
self.q3 = rabbitmq.Queue('test_queue_for_threading_test')
self.q2.delete()
self.q2.reconnect()
self.q3.delete()
self.q3.reconnect()
@classmethod
def tearDownClass(self):
self.q2.delete()
self.q3.delete()
del self.q1
del self.q2
del self.q3
def test_10_put(self):
self.assertEqual(self.q1.qsize(), 0)
self.assertEqual(self.q2.qsize(), 0)
self.q1.put('TEST_DATA1', timeout=3)
self.q1.put('TEST_DATA2_中文', timeout=3)
time.sleep(0.01)
self.assertEqual(self.q1.qsize(), 2)
self.assertEqual(self.q2.qsize(), 2)
def test_20_get(self):
self.assertEqual(self.q1.get(timeout=0.01), 'TEST_DATA1')
self.assertEqual(self.q2.get_nowait(), 'TEST_DATA2_中文')
with self.assertRaises(self.q1.Empty):
self.q2.get(timeout=0.01)
with self.assertRaises(self.q1.Empty):
self.q2.get_nowait()
def test_30_full(self):
self.assertEqual(self.q1.qsize(), 0)
self.assertEqual(self.q2.qsize(), 0)
for i in range(2):
self.q1.put_nowait('TEST_DATA%d' % i)
for i in range(3):
self.q2.put('TEST_DATA%d' % i)
with self.assertRaises(self.q1.Full):
self.q1.put('TEST_DATA6', timeout=0.01)
with self.assertRaises(self.q1.Full):
self.q1.put_nowait('TEST_DATA6')
def test_40_multiple_threading_error(self):
def put(q):
for i in range(100):
q.put("DATA_%d" % i)
def get(q):
for i in range(100):
q.get()
utils.run_in_thread(put, self.q3)
get(self.q3)
| apache-2.0 |
alexandrucoman/vbox-nova-driver | nova/tests/unit/scheduler/test_filters.py | 17 | 8491 | # Copyright 2012 OpenStack Foundation # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Host Filters.
"""
import inspect
import sys
from nova import filters
from nova import loadables
from nova import test
class Filter1(filters.BaseFilter):
"""Test Filter class #1."""
pass
class Filter2(filters.BaseFilter):
"""Test Filter class #2."""
pass
class FiltersTestCase(test.NoDBTestCase):
def test_filter_all(self):
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
result = base_filter.filter_all(filter_obj_list, filter_properties)
self.assertTrue(inspect.isgenerator(result))
self.assertEqual(['obj1', 'obj3'], list(result))
def test_filter_all_recursive_yields(self):
# Test filter_all() allows generators from previous filter_all()s.
# filter_all() yields results. We want to make sure that we can
# call filter_all() with generators returned from previous calls
# to filter_all().
filter_obj_list = ['obj1', 'obj2', 'obj3']
filter_properties = 'fake_filter_properties'
base_filter = filters.BaseFilter()
self.mox.StubOutWithMock(base_filter, '_filter_one')
total_iterations = 200
# The order that _filter_one is going to get called gets
# confusing because we will be recursively yielding things..
# We are going to simulate the first call to filter_all()
# returning False for 'obj2'. So, 'obj1' will get yielded
# 'total_iterations' number of times before the first filter_all()
# call gets to processing 'obj2'. We then return 'False' for it.
# After that, 'obj3' gets yielded 'total_iterations' number of
# times.
for x in xrange(total_iterations):
base_filter._filter_one('obj1', filter_properties).AndReturn(True)
base_filter._filter_one('obj2', filter_properties).AndReturn(False)
for x in xrange(total_iterations):
base_filter._filter_one('obj3', filter_properties).AndReturn(True)
self.mox.ReplayAll()
objs = iter(filter_obj_list)
for x in xrange(total_iterations):
# Pass in generators returned from previous calls.
objs = base_filter.filter_all(objs, filter_properties)
self.assertTrue(inspect.isgenerator(objs))
self.assertEqual(['obj1', 'obj3'], list(objs))
def test_get_filtered_objects(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_objs_last = ['last', 'filter3', 'objects3']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
filt2_mock.run_filter_for_index(0).AndReturn(True)
filt2_mock.filter_all(filter_objs_second,
filter_properties).AndReturn(filter_objs_last)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
self.assertEqual(filter_objs_last, result)
def test_get_filtered_objects_for_index(self):
"""Test that we don't call a filter when its
run_filter_for_index() method returns false
"""
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_objs_second = ['second', 'filter2', 'objects2']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(filter_objs_second)
# return false so filter_all will not be called
filt2_mock.run_filter_for_index(0).AndReturn(False)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
def test_get_filtered_objects_none_response(self):
filter_objs_initial = ['initial', 'filter1', 'objects1']
filter_properties = 'fake_filter_properties'
def _fake_base_loader_init(*args, **kwargs):
pass
self.stubs.Set(loadables.BaseLoader, '__init__',
_fake_base_loader_init)
filt1_mock = self.mox.CreateMock(Filter1)
filt2_mock = self.mox.CreateMock(Filter2)
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter1',
use_mock_anything=True)
self.mox.StubOutWithMock(filt1_mock, 'run_filter_for_index')
self.mox.StubOutWithMock(filt1_mock, 'filter_all')
# Shouldn't be called.
self.mox.StubOutWithMock(sys.modules[__name__], 'Filter2',
use_mock_anything=True)
self.mox.StubOutWithMock(filt2_mock, 'filter_all')
filt1_mock.run_filter_for_index(0).AndReturn(True)
filt1_mock.filter_all(filter_objs_initial,
filter_properties).AndReturn(None)
self.mox.ReplayAll()
filter_handler = filters.BaseFilterHandler(filters.BaseFilter)
filter_mocks = [filt1_mock, filt2_mock]
result = filter_handler.get_filtered_objects(filter_mocks,
filter_objs_initial,
filter_properties)
self.assertIsNone(result)
| apache-2.0 |
zerc/django | tests/fixtures/models.py | 230 | 3335 | """
Fixtures.
Fixtures are a way of loading data into the database in bulk. Fixure data
can be stored in any serializable format (including JSON and XML). Fixtures
are identified by name, and are stored in either a directory named 'fixtures'
in the application directory, or in one of the directories named in the
``FIXTURE_DIRS`` setting.
"""
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Category(models.Model):
title = models.CharField(max_length=100)
description = models.TextField()
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
class Meta:
ordering = ('-pub_date', 'headline')
@python_2_unicode_compatible
class Blog(models.Model):
name = models.CharField(max_length=100)
featured = models.ForeignKey(Article, models.CASCADE, related_name='fixtures_featured_set')
articles = models.ManyToManyField(Article, blank=True,
related_name='fixtures_articles_set')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=100)
tagged_type = models.ForeignKey(ContentType, models.CASCADE, related_name="fixtures_tag_set")
tagged_id = models.PositiveIntegerField(default=0)
tagged = GenericForeignKey(ct_field='tagged_type', fk_field='tagged_id')
def __str__(self):
return '<%s: %s> tagged "%s"' % (self.tagged.__class__.__name__,
self.tagged, self.name)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
def natural_key(self):
return (self.name,)
class SpyManager(PersonManager):
def get_queryset(self):
return super(SpyManager, self).get_queryset().filter(cover_blown=False)
class Spy(Person):
objects = SpyManager()
cover_blown = models.BooleanField(default=False)
@python_2_unicode_compatible
class Visa(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
permissions = models.ManyToManyField(Permission, blank=True)
def __str__(self):
return '%s %s' % (self.person.name,
', '.join(p.name for p in self.permissions.all()))
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=100)
authors = models.ManyToManyField(Person)
def __str__(self):
authors = ' and '.join(a.name for a in self.authors.all())
return '%s by %s' % (self.name, authors) if authors else self.name
class Meta:
ordering = ('name',)
| bsd-3-clause |
suku248/nest-simulator | pynest/nest/tests/test_pp_psc_delta.py | 15 | 5883 | # -*- coding: utf-8 -*-
#
# test_pp_psc_delta.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import nest
import numpy as np
class PpPscDeltaTestCase(unittest.TestCase):
"""Tests for pp_psc_delta"""
def test_rate_and_fixed_dead_time(self):
"""
Check for reasonable firing rate and if fixed dead-time is respected
"""
# test parameters
d = 25.0
lam = 10.0
T = 10000.0
nest.ResetKernel()
nrn = nest.Create('pp_psc_delta')
params = {'tau_m': 10.0,
'C_m': 250.0,
'dead_time': d,
'dead_time_random': False,
'dead_time_shape': 1,
'with_reset': False,
'tau_sfa': 34.0,
'q_sfa': 0.0, # // mV, reasonable default is 7 mV
'c_1': 0.0,
'c_2': lam,
'c_3': 0.25,
'I_e': 0.0,
't_ref_remaining': 0.0
}
nest.SetStatus(nrn, params)
sr = nest.Create('spike_recorder')
nest.Connect(nrn, sr)
nest.Simulate(T)
spikes = nest.GetStatus(sr)[0]['events']['times']
rate_sim = len(spikes) / (T * 1e-3)
rate_ana = 1. / (1. / lam + d * 1e-3)
ratio = rate_sim / rate_ana
# This could fail due to bad luck. However, if it passes once,
# then it should always do so, since the random numbers are
# reproducible in NEST.
self.assertLess(0.5, ratio)
self.assertLess(ratio, 1.5)
isi = np.diff(spikes)
self.assertGreaterEqual(min(isi), d)
def test_random_dead_time(self):
"""Check if random dead-time moments are respected."""
# test parameters
d = 50.0
n = 10
lam = 1.0e6
T = 10000.0
nest.ResetKernel()
nrn = nest.Create('pp_psc_delta')
params = {'tau_m': 10.0,
'C_m': 250.0,
'dead_time': d,
'dead_time_random': True,
'dead_time_shape': 10,
'with_reset': False,
'tau_sfa': 34.0,
'q_sfa': 0.0, # // mV, reasonable default is 7 mV
'c_1': 0.0,
'c_2': lam,
'c_3': 0.25,
'I_e': 0.0,
't_ref_remaining': 0.0
}
nest.SetStatus(nrn, params)
sr = nest.Create('spike_recorder')
nest.Connect(nrn, sr)
nest.Simulate(T)
spikes = nest.GetStatus(sr)[0]['events']['times']
rate_sim = len(spikes) / (T * 1e-3)
rate_ana = 1. / (1. / lam + d * 1e-3)
ratio = rate_sim / rate_ana
# This could fail due to bad luck. However, if it passes once,
# then it should always do so, since the random numbers are
# reproducible in NEST.
self.assertLess(0.5, ratio)
self.assertLess(ratio, 1.5)
isi = np.diff(spikes)
# compute moments of ISI to get mean and variance
isi_mean = np.mean(isi)
isi_var = np.var(isi)
ratio_mean = isi_mean / d
self.assertLessEqual(0.5, ratio_mean)
self.assertLessEqual(ratio_mean, 1.5)
isi_var_th = n / (n / d) ** 2
ratio_var = isi_var / isi_var_th
self.assertLessEqual(0.5, ratio_var)
self.assertLessEqual(ratio_var, 1.5)
def test_adapting_threshold(self):
"""Check if threshold adaptation works by looking for negative serial
correlation of ISI."""
# test parameters
d = 1e-8
lam = 30.0
T = 10000.0
nest.ResetKernel()
nrn = nest.Create('pp_psc_delta')
params = {'tau_m': 10.0,
'C_m': 250.0,
'dead_time': d,
'dead_time_random': False,
'dead_time_shape': 1,
'with_reset': False,
'tau_sfa': 34.0,
'q_sfa': 7.0, # // mV, reasonable default is 7 mV
'c_1': 0.0,
'c_2': lam,
'c_3': 0.25,
'I_e': 0.0,
't_ref_remaining': 0.0
}
nest.SetStatus(nrn, params)
sr = nest.Create('spike_recorder')
nest.Connect(nrn, sr)
nest.Simulate(T)
spikes = nest.GetStatus(sr)[0]['events']['times']
# This could fail due to bad luck. However, if it passes once,
# then it should always do so, since the random numbers are
# reproducible in NEST. Adaptive threshold changes rate, thus
# the ratio is not asserted here.
isi = np.diff(spikes)
isi_mean = np.mean(isi)
isi_var = np.var(isi)
isi_12 = np.sum(isi[:-1] * isi[1:])
isi_corr = (isi_12 / (len(isi) - 1) - isi_mean ** 2) / isi_var
self.assertLessEqual(-1.0, isi_corr)
self.assertLessEqual(isi_corr, 0.0)
def suite():
suite = unittest.makeSuite(PpPscDeltaTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
nickw444/quadcopter | older stuff/Quad/Adafruit-Raspberry-Pi-Python-Code/Adafruit_LEDBackpack/Adafruit_LEDBackpack.py | 3 | 2524 | #!/usr/bin/python
import time
from copy import copy
from Adafruit_I2C import Adafruit_I2C
# ============================================================================
# LEDBackpack Class
# ============================================================================
class LEDBackpack:
i2c = None
# Registers
__HT16K33_REGISTER_DISPLAY_SETUP = 0x80
__HT16K33_REGISTER_SYSTEM_SETUP = 0x20
__HT16K33_REGISTER_DIMMING = 0xE0
# Blink rate
__HT16K33_BLINKRATE_OFF = 0x00
__HT16K33_BLINKRATE_2HZ = 0x01
__HT16K33_BLINKRATE_1HZ = 0x02
__HT16K33_BLINKRATE_HALFHZ = 0x03
# Display buffer (8x16-bits)
__buffer = [0x0000, 0x0000, 0x0000, 0x0000, \
0x0000, 0x0000, 0x0000, 0x0000 ]
# Constructor
def __init__(self, address=0x70, debug=False):
self.i2c = Adafruit_I2C(address)
self.address = address
self.debug = debug
# Turn the oscillator on
self.i2c.write8(self.__HT16K33_REGISTER_SYSTEM_SETUP | 0x01, 0x00)
# Turn blink off
self.setBlinkRate(self.__HT16K33_BLINKRATE_OFF)
# Set maximum brightness
self.setBrightness(15)
# Clear the screen
self.clear()
def setBrightness(self, brightness):
"Sets the brightness level from 0..15"
if (brightness > 15):
brightness = 15
self.i2c.write8(self.__HT16K33_REGISTER_DIMMING | brightness, 0x00)
def setBlinkRate(self, blinkRate):
"Sets the blink rate"
if (blinkRate > self.__HT16K33_BLINKRATE_HALFHZ):
blinkRate = self.__HT16K33_BLINKRATE_OFF
self.i2c.write8(self.__HT16K33_REGISTER_DISPLAY_SETUP | 0x01 | (blinkRate << 1), 0x00)
def setBufferRow(self, row, value, update=True):
"Updates a single 16-bit entry in the 8*16-bit buffer"
if (row > 7):
return # Prevent buffer overflow
self.__buffer[row] = value # value # & 0xFFFF
if (update):
self.writeDisplay() # Update the display
def getBuffer(self):
"Returns a copy of the raw buffer contents"
bufferCopy = copy(self.__buffer)
return bufferCopy
def writeDisplay(self):
"Updates the display memory"
bytes = []
for item in self.__buffer:
bytes.append(item & 0xFF)
bytes.append((item >> 8) & 0xFF)
self.i2c.writeList(0x00, bytes)
def clear(self, update=True):
"Clears the display memory"
self.__buffer = [ 0, 0, 0, 0, 0, 0, 0, 0 ]
if (update):
self.writeDisplay()
led = LEDBackpack(0x70)
| gpl-3.0 |
rdio/translate-toolkit | storage/versioncontrol/darcs.py | 3 | 3743 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from translate.storage.versioncontrol import GenericRevisionControlSystem
from translate.storage.versioncontrol import run_command
def is_available():
"""check if darcs is installed"""
exitcode, output, error = run_command(["darcs", "--version"])
return exitcode == 0
class darcs(GenericRevisionControlSystem):
"""Class to manage items under revision control of darcs."""
RCS_METADIR = "_darcs"
SCAN_PARENTS = True
def update(self, revision=None):
"""Does a clean update of the given path
@param revision: ignored for darcs
"""
# revert local changes (avoids conflicts)
command = ["darcs", "revert", "--repodir", self.root_dir,
"-a", self.location_rel]
exitcode, output_revert, error = run_command(command)
if exitcode != 0:
raise IOError("[Darcs] error running '%s': %s" % (command, error))
# pull new patches
command = ["darcs", "pull", "--repodir", self.root_dir, "-a"]
exitcode, output_pull, error = run_command(command)
if exitcode != 0:
raise IOError("[Darcs] error running '%s': %s" % (command, error))
return output_revert + output_pull
def commit(self, message=None, author=None):
"""Commits the file and supplies the given commit message if present"""
if message is None:
message = ""
# set change message
command = ["darcs", "record", "-a", "--repodir", self.root_dir,
"--skip-long-comment", "-m", message]
# add the 'author' to the list of arguments if it was given
if author:
command.extend(["--author", author])
# the location of the file is the last argument
command.append(self.location_rel)
exitcode, output_record, error = run_command(command)
if exitcode != 0:
raise IOError("[Darcs] Error running darcs command '%s': %s" \
% (command, error))
# push changes
command = ["darcs", "push", "-a", "--repodir", self.root_dir]
exitcode, output_push, error = run_command(command)
if exitcode != 0:
raise IOError("[Darcs] Error running darcs command '%s': %s" \
% (command, error))
return output_record + output_push
def getcleanfile(self, revision=None):
"""Get a clean version of a file from the darcs repository
@param revision: ignored for darcs
"""
import os
filename = os.path.join(self.root_dir, self.RCS_METADIR, 'pristine',
self.location_rel)
try:
darcs_file = open(filename)
output = darcs_file.read()
darcs_file.close()
except IOError, error:
raise IOError("[Darcs] error reading original file '%s': %s" % \
(filename, error))
return output
| gpl-2.0 |
utkbansal/kuma | kuma/core/sections.py | 22 | 1158 | from tower import ugettext_lazy as _
class SECTION_ADDONS:
short = 'addons'
pretty = _(u'Add-ons')
twitter = 'twitter-addons'
updates = 'updates-addons'
class SECTION_MOZILLA:
short = 'mozilla'
pretty = _(u'Mozilla')
twitter = 'twitter-mozilla'
updates = 'updates-mozilla'
class SECTION_APPS:
short = 'apps'
pretty = _(u'Apps')
twitter = 'twitter-apps'
updates = 'updates-apps'
class SECTION_MOBILE:
short = 'mobile'
pretty = _(u'Mobile')
twitter = 'twitter-mobile'
updates = 'updates-mobile'
class SECTION_WEB:
short = 'web'
pretty = _(u'Web')
twitter = 'twitter-web'
updates = 'updates-web'
class SECTION_HACKS:
short = 'hacks'
pretty = _(u'Moz Hacks')
twitter = 'twitter-moz-hacks'
updates = 'updates-moz-hacks'
SECTION_USAGE = _sections = (SECTION_HACKS,)
SECTIONS = dict((section.short, section)
for section in _sections)
SECTIONS_TWITTER = dict((section.twitter, section)
for section in _sections)
SECTIONS_UPDATES = dict((section.updates, section)
for section in _sections)
| mpl-2.0 |
benklaasen/namebench | nb_third_party/dns/rdtypes/ANY/HINFO.py | 248 | 2659 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.exception
import dns.rdata
import dns.tokenizer
class HINFO(dns.rdata.Rdata):
"""HINFO record
@ivar cpu: the CPU type
@type cpu: string
@ivar os: the OS type
@type os: string
@see: RFC 1035"""
__slots__ = ['cpu', 'os']
def __init__(self, rdclass, rdtype, cpu, os):
super(HINFO, self).__init__(rdclass, rdtype)
self.cpu = cpu
self.os = os
def to_text(self, origin=None, relativize=True, **kw):
return '"%s" "%s"' % (dns.rdata._escapify(self.cpu),
dns.rdata._escapify(self.os))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
cpu = tok.get_string()
os = tok.get_string()
tok.get_eol()
return cls(rdclass, rdtype, cpu, os)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.cpu)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.cpu)
l = len(self.os)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(self.os)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
cpu = wire[current : current + l]
current += l
rdlen -= l
l = ord(wire[current])
current += 1
rdlen -= 1
if l != rdlen:
raise dns.exception.FormError
os = wire[current : current + l]
return cls(rdclass, rdtype, cpu, os)
from_wire = classmethod(from_wire)
def _cmp(self, other):
v = cmp(self.cpu, other.cpu)
if v == 0:
v = cmp(self.os, other.os)
return v
| apache-2.0 |
zamattiac/SHARE | providers/org/seafdec/__init__.py | 1 | 5263 | default_app_config = 'providers.org.seafdec.apps.AppConfig'
"""
Example record:
<record xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><header><identifier>oai:repository.seafdec.org.ph:10862/143</identifier><datestamp>2016-10-05T13:04:40Z</datestamp><setSpec>com_10862_84</setSpec><setSpec>com_10862_46</setSpec><setSpec>com_10862_1</setSpec><setSpec>col_10862_130</setSpec></header><metadata><oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:doc="http://www.lyncode.com/xoai" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:dc="http://purl.org/dc/elements/1.1/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:title>Aquaculture development in Thailand</dc:title>
<dc:creator>Sirikul, Boonsong</dc:creator>
<dc:creator>Luanprida, Somsak</dc:creator>
<dc:creator>Chaiyakam, Kanit</dc:creator>
<dc:creator>Sriprasert, Revadee.</dc:creator>
<dc:contributor>Juario, Jesus V.</dc:contributor>
<dc:contributor>Benitez, Lita V.</dc:contributor>
<dc:subject>Anadara nofidera</dc:subject>
<dc:subject>Macrobrachium</dc:subject>
<dc:subject>Porphyra</dc:subject>
<dc:subject>Penaeus</dc:subject>
<dc:subject>Gracilaria</dc:subject>
<dc:subject>Arcuatuala arcuala</dc:subject>
<dc:subject>Modiolus metcalfi</dc:subject>
<dc:subject>Thailand</dc:subject>
<dc:subject>Seed (aquaculture)</dc:subject>
<dc:subject>Shrimp culture</dc:subject>
<dc:subject>Feed</dc:subject>
<dc:subject>Pond culture</dc:subject>
<dc:subject>Mollusc culture</dc:subject>
<dc:subject>Mussel culture</dc:subject>
<dc:subject>Seed production</dc:subject>
<dc:subject>Prawn culture</dc:subject>
<dc:subject>Oyster culture</dc:subject>
<dc:subject>Freshwater fish</dc:subject>
<dc:subject>Aquaculture</dc:subject>
<dc:subject>Artificial feeding</dc:subject>
<dc:subject>Brackishwater aquaculture</dc:subject>
<dc:subject>Aquaculture systems</dc:subject>
<dc:subject>Clam culture</dc:subject>
<dc:subject>Feed composition</dc:subject>
<dc:subject>Marine fish</dc:subject>
<dc:subject>Aquaculture development</dc:subject>
<dc:subject>Cage culture</dc:subject>
<dc:subject>Seaweed culture</dc:subject>
<dc:subject>Marine aquaculture</dc:subject>
<dc:subject>Freshwater aquaculture</dc:subject>
<dc:subject>Fish culture</dc:subject>
<dc:description>Aquaculture practised in Thailand is in the form of pond culture and cage culture in freshwater, brackishwater and coastal areas. The main species cultured include freshwater prawns, brackishwater shrimp, cockles, mussels, and various freshwater and marine finfishes. There is good potential for increased production from freshwater, brackishwater and marine aquaculture. However, the 1983 production of 145 000 mt represents only about 6% of Thailand's total fish production and production in this subsector has fluctuated widely. It will be several years before aquaculture production will contribute substantially to total production. Nonetheless, the culture of high value species of shrimp and fish could contribute significantly to export earnings during the next 5 to 10 years.
Conducted primarily by government agencies, research and development are along the lines of increasing seed supply, establishing new culture techniques or improving older ones. The Department of Fisheries (DOF) together with some private companies have ventured into the development and testing of artificial diets for the various cultured species using a variety of indigenous feed stuffs.
It is estimated that with adequate investments and appropriate support, aquaculture production will increase from 145 000 mt in 1983 to 378 000 mt in 1991, showing an annual increase of about 13% over this period. Major increases would come from bivalve mariculture (131 000 mt), brackishwater ponds (36 000 mt) freshwater ponds (46 000 mt) and brackishwater cage culture (20 000 mt).</dc:description>
<dc:date>2011-06-06T02:34:00Z</dc:date>
<dc:date>2011-06-06T02:34:00Z</dc:date>
<dc:date>1988</dc:date>
<dc:type>Conference paper</dc:type>
<dc:identifier>Sirikul, B., Luanprida, S., Chaiyakam, K., Sriprasert, R. (1988). Aquaculture development in Thailand. In J. V. Juario & L. V. Benitez (Eds.), Perspectives in Aquaculture Development in Southeast Asia and Japan: Contributions of the SEAFDEC Aquaculture Department. Proceedings of the Seminar on Aquaculture Development in Southeast Asia, 8-12 September 1987, Iloilo City, Philippines. (pp. 129-148). Tigbauan, Iloilo, Philippines: Aquaculture Department, Southeast Asian Fisheries Development Center.</dc:identifier>
<dc:identifier>971851113X</dc:identifier>
<dc:identifier>http://hdl.handle.net/10862/143</dc:identifier>
<dc:identifier.isbn>971851113X</dc:identifier.isbn>
<dc:language>en</dc:language>
<dc:publisher>Aquaculture Department, Southeast Asian Fisheries Development Center</dc:publisher>
<dc:relation.ispartof>Seminar on Aquaculture Development in Southeast Asia, 8-12 September 1987, Iloilo City, Philippines</dc:relation.ispartof>
<dc:identifier.thumbnail>http://repository.seafdec.org.ph/bitstream/10862/143/3/adsea87p129-148.pdf.jpg</dc:identifier.thumbnail>
</oai_dc:dc>
</metadata></record>
"""
| apache-2.0 |
pinkavaj/gnuradio | gr-blocks/python/blocks/qa_interleave.py | 47 | 5392 | #!/usr/bin/env python
#
# Copyright 2004,2007,2010,2012-2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_interleave (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_int_001 (self):
lenx = 64
src0 = blocks.vector_source_f (range (0, lenx, 4))
src1 = blocks.vector_source_f (range (1, lenx, 4))
src2 = blocks.vector_source_f (range (2, lenx, 4))
src3 = blocks.vector_source_f (range (3, lenx, 4))
op = blocks.interleave (gr.sizeof_float)
dst = blocks.vector_sink_f ()
self.tb.connect (src0, (op, 0))
self.tb.connect (src1, (op, 1))
self.tb.connect (src2, (op, 2))
self.tb.connect (src3, (op, 3))
self.tb.connect (op, dst)
self.tb.run ()
expected_result = tuple (range (lenx))
result_data = dst.data ()
self.assertFloatTuplesAlmostEqual (expected_result, result_data)
def test_int_002 (self):
blksize = 4
lenx = 64
plusup_big = lambda a: a + (blksize * 4)
plusup_little = lambda a: a + blksize
a_vec = range(0,blksize)
for i in range(0,(lenx/(4 * blksize)) - 1):
a_vec += map(plusup_big, a_vec[len(a_vec) - blksize:])
b_vec = map(plusup_little, a_vec)
c_vec = map(plusup_little, b_vec)
d_vec = map(plusup_little, c_vec)
src0 = blocks.vector_source_f (a_vec)
src1 = blocks.vector_source_f (b_vec)
src2 = blocks.vector_source_f (c_vec)
src3 = blocks.vector_source_f (d_vec)
op = blocks.interleave (gr.sizeof_float, blksize)
dst = blocks.vector_sink_f ()
self.tb.connect (src0, (op, 0))
self.tb.connect (src1, (op, 1))
self.tb.connect (src2, (op, 2))
self.tb.connect (src3, (op, 3))
self.tb.connect (op, dst)
self.tb.run ()
expected_result = tuple (range (lenx))
result_data = dst.data ()
self.assertFloatTuplesAlmostEqual (expected_result, result_data)
def test_deint_001 (self):
lenx = 64
src = blocks.vector_source_f (range (lenx))
op = blocks.deinterleave (gr.sizeof_float)
dst0 = blocks.vector_sink_f ()
dst1 = blocks.vector_sink_f ()
dst2 = blocks.vector_sink_f ()
dst3 = blocks.vector_sink_f ()
self.tb.connect (src, op)
self.tb.connect ((op, 0), dst0)
self.tb.connect ((op, 1), dst1)
self.tb.connect ((op, 2), dst2)
self.tb.connect ((op, 3), dst3)
self.tb.run ()
expected_result0 = tuple (range (0, lenx, 4))
expected_result1 = tuple (range (1, lenx, 4))
expected_result2 = tuple (range (2, lenx, 4))
expected_result3 = tuple (range (3, lenx, 4))
self.assertFloatTuplesAlmostEqual (expected_result0, dst0.data ())
self.assertFloatTuplesAlmostEqual (expected_result1, dst1.data ())
self.assertFloatTuplesAlmostEqual (expected_result2, dst2.data ())
self.assertFloatTuplesAlmostEqual (expected_result3, dst3.data ())
def test_deint_002 (self):
blksize = 4
lenx = 64
src = blocks.vector_source_f (range (lenx))
op = blocks.deinterleave (gr.sizeof_float, blksize)
dst0 = blocks.vector_sink_f ()
dst1 = blocks.vector_sink_f ()
dst2 = blocks.vector_sink_f ()
dst3 = blocks.vector_sink_f ()
self.tb.connect (src, op)
self.tb.connect ((op, 0), dst0)
self.tb.connect ((op, 1), dst1)
self.tb.connect ((op, 2), dst2)
self.tb.connect ((op, 3), dst3)
self.tb.run ()
plusup_big = lambda a: a + (blksize * 4)
plusup_little = lambda a: a + blksize
a_vec = range(0,blksize)
for i in range(0,(lenx/(4 * blksize)) - 1):
a_vec += map(plusup_big, a_vec[len(a_vec) - blksize:])
b_vec = map(plusup_little, a_vec)
c_vec = map(plusup_little, b_vec)
d_vec = map(plusup_little, c_vec)
expected_result0 = tuple (a_vec)
expected_result1 = tuple (b_vec)
expected_result2 = tuple (c_vec)
expected_result3 = tuple (d_vec)
self.assertFloatTuplesAlmostEqual (expected_result0, dst0.data ())
self.assertFloatTuplesAlmostEqual (expected_result1, dst1.data ())
self.assertFloatTuplesAlmostEqual (expected_result2, dst2.data ())
self.assertFloatTuplesAlmostEqual (expected_result3, dst3.data ())
if __name__ == '__main__':
gr_unittest.run(test_interleave, "test_interleave.xml")
| gpl-3.0 |
windofthesky/ansible | contrib/inventory/freeipa.py | 95 | 2201 | #!/usr/bin/env python
import argparse
from ipalib import api
import json
def initialize():
'''
This function initializes the FreeIPA/IPA API. This function requires
no arguments. A kerberos key must be present in the users keyring in
order for this to work.
'''
api.bootstrap(context='cli')
api.finalize()
try:
api.Backend.rpcclient.connect()
except AttributeError:
#FreeIPA < 4.0 compatibility
api.Backend.xmlclient.connect()
return api
def list_groups(api):
'''
This function returns a list of all host groups. This function requires
one argument, the FreeIPA/IPA API object.
'''
inventory = {}
hostvars={}
meta={}
result = api.Command.hostgroup_find()['result']
for hostgroup in result:
inventory[hostgroup['cn'][0]] = { 'hosts': [host for host in hostgroup['member_host']]}
for host in hostgroup['member_host']:
hostvars[host] = {}
inventory['_meta'] = {'hostvars': hostvars}
inv_string = json.dumps(inventory, indent=1, sort_keys=True)
print inv_string
return None
def parse_args():
'''
This function parses the arguments that were passed in via the command line.
This function expects no arguments.
'''
parser = argparse.ArgumentParser(description='Ansible FreeIPA/IPA '
'inventory module')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--list', action='store_true',
help='List active servers')
group.add_argument('--host', help='List details about the specified host')
return parser.parse_args()
def print_host(host):
'''
This function is really a stub, it could return variables to be used in
a playbook. However, at this point there are no variables stored in
FreeIPA/IPA.
This function expects one string, this hostname to lookup variables for.
'''
print json.dumps({})
return None
if __name__ == '__main__':
args = parse_args()
if args.host:
print_host(args.host)
elif args.list:
api = initialize()
list_groups(api)
| gpl-3.0 |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/python3-src/Lib/encodings/mac_greek.py | 272 | 13721 | """ Python Character Mapping Codec mac_greek generated from 'MAPPINGS/VENDORS/APPLE/GREEK.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-greek',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> CONTROL CHARACTER
'\x01' # 0x01 -> CONTROL CHARACTER
'\x02' # 0x02 -> CONTROL CHARACTER
'\x03' # 0x03 -> CONTROL CHARACTER
'\x04' # 0x04 -> CONTROL CHARACTER
'\x05' # 0x05 -> CONTROL CHARACTER
'\x06' # 0x06 -> CONTROL CHARACTER
'\x07' # 0x07 -> CONTROL CHARACTER
'\x08' # 0x08 -> CONTROL CHARACTER
'\t' # 0x09 -> CONTROL CHARACTER
'\n' # 0x0A -> CONTROL CHARACTER
'\x0b' # 0x0B -> CONTROL CHARACTER
'\x0c' # 0x0C -> CONTROL CHARACTER
'\r' # 0x0D -> CONTROL CHARACTER
'\x0e' # 0x0E -> CONTROL CHARACTER
'\x0f' # 0x0F -> CONTROL CHARACTER
'\x10' # 0x10 -> CONTROL CHARACTER
'\x11' # 0x11 -> CONTROL CHARACTER
'\x12' # 0x12 -> CONTROL CHARACTER
'\x13' # 0x13 -> CONTROL CHARACTER
'\x14' # 0x14 -> CONTROL CHARACTER
'\x15' # 0x15 -> CONTROL CHARACTER
'\x16' # 0x16 -> CONTROL CHARACTER
'\x17' # 0x17 -> CONTROL CHARACTER
'\x18' # 0x18 -> CONTROL CHARACTER
'\x19' # 0x19 -> CONTROL CHARACTER
'\x1a' # 0x1A -> CONTROL CHARACTER
'\x1b' # 0x1B -> CONTROL CHARACTER
'\x1c' # 0x1C -> CONTROL CHARACTER
'\x1d' # 0x1D -> CONTROL CHARACTER
'\x1e' # 0x1E -> CONTROL CHARACTER
'\x1f' # 0x1F -> CONTROL CHARACTER
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> CONTROL CHARACTER
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xb9' # 0x81 -> SUPERSCRIPT ONE
'\xb2' # 0x82 -> SUPERSCRIPT TWO
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xb3' # 0x84 -> SUPERSCRIPT THREE
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\u0385' # 0x87 -> GREEK DIALYTIKA TONOS
'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u0384' # 0x8B -> GREEK TONOS
'\xa8' # 0x8C -> DIAERESIS
'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xa3' # 0x92 -> POUND SIGN
'\u2122' # 0x93 -> TRADE MARK SIGN
'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
'\u2022' # 0x96 -> BULLET
'\xbd' # 0x97 -> VULGAR FRACTION ONE HALF
'\u2030' # 0x98 -> PER MILLE SIGN
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xa6' # 0x9B -> BROKEN BAR
'\u20ac' # 0x9C -> EURO SIGN # before Mac OS 9.2.2, was SOFT HYPHEN
'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\u0393' # 0xA1 -> GREEK CAPITAL LETTER GAMMA
'\u0394' # 0xA2 -> GREEK CAPITAL LETTER DELTA
'\u0398' # 0xA3 -> GREEK CAPITAL LETTER THETA
'\u039b' # 0xA4 -> GREEK CAPITAL LETTER LAMDA
'\u039e' # 0xA5 -> GREEK CAPITAL LETTER XI
'\u03a0' # 0xA6 -> GREEK CAPITAL LETTER PI
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u03a3' # 0xAA -> GREEK CAPITAL LETTER SIGMA
'\u03aa' # 0xAB -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
'\xa7' # 0xAC -> SECTION SIGN
'\u2260' # 0xAD -> NOT EQUAL TO
'\xb0' # 0xAE -> DEGREE SIGN
'\xb7' # 0xAF -> MIDDLE DOT
'\u0391' # 0xB0 -> GREEK CAPITAL LETTER ALPHA
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\xa5' # 0xB4 -> YEN SIGN
'\u0392' # 0xB5 -> GREEK CAPITAL LETTER BETA
'\u0395' # 0xB6 -> GREEK CAPITAL LETTER EPSILON
'\u0396' # 0xB7 -> GREEK CAPITAL LETTER ZETA
'\u0397' # 0xB8 -> GREEK CAPITAL LETTER ETA
'\u0399' # 0xB9 -> GREEK CAPITAL LETTER IOTA
'\u039a' # 0xBA -> GREEK CAPITAL LETTER KAPPA
'\u039c' # 0xBB -> GREEK CAPITAL LETTER MU
'\u03a6' # 0xBC -> GREEK CAPITAL LETTER PHI
'\u03ab' # 0xBD -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
'\u03a8' # 0xBE -> GREEK CAPITAL LETTER PSI
'\u03a9' # 0xBF -> GREEK CAPITAL LETTER OMEGA
'\u03ac' # 0xC0 -> GREEK SMALL LETTER ALPHA WITH TONOS
'\u039d' # 0xC1 -> GREEK CAPITAL LETTER NU
'\xac' # 0xC2 -> NOT SIGN
'\u039f' # 0xC3 -> GREEK CAPITAL LETTER OMICRON
'\u03a1' # 0xC4 -> GREEK CAPITAL LETTER RHO
'\u2248' # 0xC5 -> ALMOST EQUAL TO
'\u03a4' # 0xC6 -> GREEK CAPITAL LETTER TAU
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\u03a5' # 0xCB -> GREEK CAPITAL LETTER UPSILON
'\u03a7' # 0xCC -> GREEK CAPITAL LETTER CHI
'\u0386' # 0xCD -> GREEK CAPITAL LETTER ALPHA WITH TONOS
'\u0388' # 0xCE -> GREEK CAPITAL LETTER EPSILON WITH TONOS
'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
'\u2013' # 0xD0 -> EN DASH
'\u2015' # 0xD1 -> HORIZONTAL BAR
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u0389' # 0xD7 -> GREEK CAPITAL LETTER ETA WITH TONOS
'\u038a' # 0xD8 -> GREEK CAPITAL LETTER IOTA WITH TONOS
'\u038c' # 0xD9 -> GREEK CAPITAL LETTER OMICRON WITH TONOS
'\u038e' # 0xDA -> GREEK CAPITAL LETTER UPSILON WITH TONOS
'\u03ad' # 0xDB -> GREEK SMALL LETTER EPSILON WITH TONOS
'\u03ae' # 0xDC -> GREEK SMALL LETTER ETA WITH TONOS
'\u03af' # 0xDD -> GREEK SMALL LETTER IOTA WITH TONOS
'\u03cc' # 0xDE -> GREEK SMALL LETTER OMICRON WITH TONOS
'\u038f' # 0xDF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
'\u03cd' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH TONOS
'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
'\u03c8' # 0xE3 -> GREEK SMALL LETTER PSI
'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
'\u03c6' # 0xE6 -> GREEK SMALL LETTER PHI
'\u03b3' # 0xE7 -> GREEK SMALL LETTER GAMMA
'\u03b7' # 0xE8 -> GREEK SMALL LETTER ETA
'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
'\u03be' # 0xEA -> GREEK SMALL LETTER XI
'\u03ba' # 0xEB -> GREEK SMALL LETTER KAPPA
'\u03bb' # 0xEC -> GREEK SMALL LETTER LAMDA
'\u03bc' # 0xED -> GREEK SMALL LETTER MU
'\u03bd' # 0xEE -> GREEK SMALL LETTER NU
'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
'\u03ce' # 0xF1 -> GREEK SMALL LETTER OMEGA WITH TONOS
'\u03c1' # 0xF2 -> GREEK SMALL LETTER RHO
'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
'\u03b8' # 0xF5 -> GREEK SMALL LETTER THETA
'\u03c9' # 0xF6 -> GREEK SMALL LETTER OMEGA
'\u03c2' # 0xF7 -> GREEK SMALL LETTER FINAL SIGMA
'\u03c7' # 0xF8 -> GREEK SMALL LETTER CHI
'\u03c5' # 0xF9 -> GREEK SMALL LETTER UPSILON
'\u03b6' # 0xFA -> GREEK SMALL LETTER ZETA
'\u03ca' # 0xFB -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
'\u03cb' # 0xFC -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
'\u0390' # 0xFD -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
'\u03b0' # 0xFE -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
'\xad' # 0xFF -> SOFT HYPHEN # before Mac OS 9.2.2, was undefined
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
n-west/gnuradio-volk | gr-digital/python/digital/qa_correlate_access_code_tag.py | 33 | 2649 | #!/usr/bin/env python
#
# Copyright 2006,2007,2010,2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
default_access_code = '\xAC\xDD\xA4\xE2\xF2\x8C\x20\xFC'
def string_to_1_0_list(s):
r = []
for ch in s:
x = ord(ch)
for i in range(8):
t = (x >> i) & 0x1
r.append(t)
return r
def to_1_0_string(L):
return ''.join(map(lambda x: chr(x + ord('0')), L))
class test_correlate_access_code(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
pad = (0,) * 64
src_data = (1, 0, 1, 1, 1, 1, 0, 1, 1) + pad + (0,) * 7
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_tag_bb("1011", 0, "sync")
dst = blocks.tag_debug(1, "sync")
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.current_tags()
self.assertEqual(len(result_data), 2)
self.assertEqual(result_data[0].offset, 4)
self.assertEqual(result_data[1].offset, 9)
def test_002(self):
code = tuple(string_to_1_0_list(default_access_code))
access_code = to_1_0_string(code)
pad = (0,) * 64
#print code
#print access_code
src_data = code + (1, 0, 1, 1) + pad
expected_result = pad + code + (3, 0, 1, 1)
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_tag_bb(access_code, 0, "sync")
dst = blocks.tag_debug(1, "sync")
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.current_tags()
self.assertEqual(len(result_data), 1)
self.assertEqual(result_data[0].offset, len(code))
if __name__ == '__main__':
gr_unittest.run(test_correlate_access_code, "test_correlate_access_code_tag.xml")
| gpl-3.0 |
thefinn93/CouchPotatoServer | libs/sqlalchemy/engine/__init__.py | 12 | 15936 | # engine/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""SQL connections, SQL execution and high-level DB-API interface.
The engine package defines the basic components used to interface
DB-API modules with higher-level statement construction,
connection-management, execution and result contexts. The primary
"entry point" class into this package is the Engine and it's public
constructor ``create_engine()``.
This package includes:
base.py
Defines interface classes and some implementation classes which
comprise the basic components used to interface between a DB-API,
constructed and plain-text statements, connections, transactions,
and results.
default.py
Contains default implementations of some of the components defined
in base.py. All current database dialects use the classes in
default.py as base classes for their own database-specific
implementations.
strategies.py
The mechanics of constructing ``Engine`` objects are represented
here. Defines the ``EngineStrategy`` class which represents how
to go from arguments specified to the ``create_engine()``
function, to a fully constructed ``Engine``, including
initialization of connection pooling, dialects, and specific
subclasses of ``Engine``.
threadlocal.py
The ``TLEngine`` class is defined here, which is a subclass of
the generic ``Engine`` and tracks ``Connection`` and
``Transaction`` objects against the identity of the current
thread. This allows certain programming patterns based around
the concept of a "thread-local connection" to be possible.
The ``TLEngine`` is created by using the "threadlocal" engine
strategy in conjunction with the ``create_engine()`` function.
url.py
Defines the ``URL`` class which represents the individual
components of a string URL passed to ``create_engine()``. Also
defines a basic module-loading strategy for the dialect specifier
within a URL.
"""
# not sure what this was used for
#import sqlalchemy.databases
from sqlalchemy.engine.base import (
BufferedColumnResultProxy,
BufferedColumnRow,
BufferedRowResultProxy,
Compiled,
Connectable,
Connection,
Dialect,
Engine,
ExecutionContext,
NestedTransaction,
ResultProxy,
RootTransaction,
RowProxy,
Transaction,
TwoPhaseTransaction,
TypeCompiler
)
from sqlalchemy.engine import strategies
from sqlalchemy import util
__all__ = (
'BufferedColumnResultProxy',
'BufferedColumnRow',
'BufferedRowResultProxy',
'Compiled',
'Connectable',
'Connection',
'Dialect',
'Engine',
'ExecutionContext',
'NestedTransaction',
'ResultProxy',
'RootTransaction',
'RowProxy',
'Transaction',
'TwoPhaseTransaction',
'TypeCompiler',
'create_engine',
'engine_from_config',
)
default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments.
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
and its underlying :class:`.Dialect` and :class:`.Pool`
constructs.
The string form of the URL is
``dialect+driver://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be
specific to the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
Once established, the newly resulting :class:`.Engine` will
request a connection from the underlying :class:`.Pool` once
:meth:`.Engine.connect` is called, or a method which depends on it
such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn
will establish the first actual DBAPI connection when this request
is received. The :func:`.create_engine` call itself does **not**
establish any actual DBAPI connections directly.
See also:
:ref:`engines_toplevel`
:ref:`connections_toplevel`
:param assert_unicode: Deprecated. This flag
sets an engine-wide default value for
the ``assert_unicode`` flag on the
:class:`.String` type - see that
type for further details.
:param connect_args: a dictionary of options which will be
passed directly to the DBAPI's ``connect()`` method as
additional keyword arguments. See the example
at :ref:`custom_dbapi_args`.
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
connection pool and will be used to create all new database
connections. Usage of this function causes connection
parameters specified in the URL argument to be bypassed.
:param echo=False: if True, the Engine will log all statements
as well as a repr() of their parameter lists to the engines
logger, which defaults to sys.stdout. The ``echo`` attribute of
``Engine`` can be modified at any time to turn logging on and
off. If set to the string ``"debug"``, result rows will be
printed to the standard output as well. This flag ultimately
controls a Python logger; see :ref:`dbengine_logging` for
information on how to configure logging directly.
:param echo_pool=False: if True, the connection pool will log
all checkouts/checkins to the logging stream, which defaults to
sys.stdout. This flag ultimately controls a Python logger; see
:ref:`dbengine_logging` for information on how to configure logging
directly.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
the DBAPI does not specify unicode behavior at all,
so SQLAlchemy must make decisions for each of the above
values on a per-DBAPI basis - implementations are
completely inconsistent in their behavior.
:param execution_options: Dictionary execution options which will
be applied to all connections. See
:meth:`~sqlalchemy.engine.base.Connection.execution_options`
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including Postgresql, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
:param label_length=None: optional integer value which limits
the size of dynamically generated column labels to that many
characters. If less than 6, labels are generated as
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
connection pool "overflow", that is connections that can be
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
:param module=None: reference to a Python module object (the module itself, not
its string name). Specifies an alternate DBAPI module to be used
by the engine's dialect. Each sub-dialect references a specific DBAPI which
will be imported before first connect. This parameter causes the
import to be bypassed, and the given module to be used instead.
Can be used for testing of DBAPIs as well as to inject "mock"
DBAPI implementations into the :class:`.Engine`.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
:class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
instance using the connection parameters given in the URL. Note
this differs from ``pool`` in that you don't actually
instantiate the pool in this case, you just indicate what type
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_size=5: the number of connections to keep open
inside the connection pool. This used with :class:`~sqlalchemy.pool.QueuePool` as
well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With
:class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting
of 0 indicates no limit; to disable pooling, set ``poolclass`` to
:class:`~sqlalchemy.pool.NullPool` instead.
:param pool_recycle=-1: this setting causes the pool to recycle
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
:param pool_reset_on_return='rollback': set the "reset on return"
behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`. (new as of 0.7.6)
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param strategy='plain': selects alternate engine implementations.
Currently available is the ``threadlocal``
strategy, which is described in :ref:`threadlocal_strategy`.
"""
strategy = kwargs.pop('strategy', default_strategy)
strategy = strategies.strategies[strategy]
return strategy.create(*args, **kwargs)
def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs):
"""Create a new Engine instance using a configuration dictionary.
The dictionary is typically produced from a config file where keys
are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The
'prefix' argument indicates the prefix to be searched for.
A select set of keyword arguments will be "coerced" to their
expected type based on string values. In a future release, this
functionality will be expanded and include dialect-specific
arguments.
"""
opts = _coerce_config(configuration, prefix)
opts.update(kwargs)
url = opts.pop('url')
return create_engine(url, **opts)
def _coerce_config(configuration, prefix):
"""Convert configuration values to expected types."""
options = dict((key[len(prefix):], configuration[key])
for key in configuration
if key.startswith(prefix))
for option, type_ in (
('convert_unicode', util.bool_or_str('force')),
('pool_timeout', int),
('echo', util.bool_or_str('debug')),
('echo_pool', util.bool_or_str('debug')),
('pool_recycle', int),
('pool_size', int),
('max_overflow', int),
('pool_threadlocal', bool),
('use_native_unicode', bool),
):
util.coerce_kw_type(options, option, type_)
return options
| gpl-3.0 |
yousafsyed/casperjs | bin/Lib/encodings/cp863.py | 272 | 34252 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP863.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp863',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00b6, # PILCROW SIGN
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x2017, # DOUBLE LOW LINE
0x008e: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x008f: 0x00a7, # SECTION SIGN
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x0092: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x0095: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00a4, # CURRENCY SIGN
0x0099: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x009e: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00a6, # BROKEN BAR
0x00a1: 0x00b4, # ACUTE ACCENT
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00a8, # DIAERESIS
0x00a5: 0x00b8, # CEDILLA
0x00a6: 0x00b3, # SUPERSCRIPT THREE
0x00a7: 0x00af, # MACRON
0x00a8: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xc2' # 0x0084 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xb6' # 0x0086 -> PILCROW SIGN
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u2017' # 0x008d -> DOUBLE LOW LINE
'\xc0' # 0x008e -> LATIN CAPITAL LETTER A WITH GRAVE
'\xa7' # 0x008f -> SECTION SIGN
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xc8' # 0x0091 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xca' # 0x0092 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xcb' # 0x0094 -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcf' # 0x0095 -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xa4' # 0x0098 -> CURRENCY SIGN
'\xd4' # 0x0099 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xd9' # 0x009d -> LATIN CAPITAL LETTER U WITH GRAVE
'\xdb' # 0x009e -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xa6' # 0x00a0 -> BROKEN BAR
'\xb4' # 0x00a1 -> ACUTE ACCENT
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xa8' # 0x00a4 -> DIAERESIS
'\xb8' # 0x00a5 -> CEDILLA
'\xb3' # 0x00a6 -> SUPERSCRIPT THREE
'\xaf' # 0x00a7 -> MACRON
'\xce' # 0x00a8 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xbe' # 0x00ad -> VULGAR FRACTION THREE QUARTERS
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x0098, # CURRENCY SIGN
0x00a6: 0x00a0, # BROKEN BAR
0x00a7: 0x008f, # SECTION SIGN
0x00a8: 0x00a4, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00af: 0x00a7, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00a6, # SUPERSCRIPT THREE
0x00b4: 0x00a1, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x0086, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00a5, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00ad, # VULGAR FRACTION THREE QUARTERS
0x00c0: 0x008e, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c2: 0x0084, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x0091, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x0092, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x0094, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00ce: 0x00a8, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x0095, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d4: 0x0099, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d9: 0x009d, # LATIN CAPITAL LETTER U WITH GRAVE
0x00db: 0x009e, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x2017: 0x008d, # DOUBLE LOW LINE
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
wannabeCitizen/Congress_DCSC | congressionalrecord/lib/xml_annotator.py | 1 | 1321 | from .regex import Regex
class XMLAnnotator(object):
def __init__(self, string):
self.regx = Regex(string)
def register_tag(self, re_string, open_tag, group=None):
''' Registers an XML tag to be inserted around a matching regular
expression. The closing tag is derived from the opening tag. This
function only registers the tags and their associated regex; apply()
must be run before the tags are inserted. If group is specified, then
the the tag is inserted around the matching group instead of the entire
regular expression. '''
close_tag = self.derive_close_tag(open_tag)
self.regx.insert_before(re_string, open_tag, group)
self.regx.insert_after(re_string, close_tag, group)
def register_tag_open(self, re_string, open_tag, group=None):
self.regx.insert_before(re_string, open_tag, group)
def register_tag_close(self, re_string, close_tag, group=None):
self.regx.insert_after(re_string, close_tag, group)
def derive_close_tag(self, open_tag):
space = open_tag.find(' ')
if space != -1:
close_tag = '</' + open_tag[1:space] + '>'
else:
close_tag = '</' + open_tag[1:]
return close_tag
def apply(self):
return self.regx.apply()
| gpl-2.0 |
WhiskeyMedia/ella | test_ella/test_core/test_publishable.py | 2 | 9193 | # -*- coding: utf-8 -*-
from datetime import timedelta, datetime
import pytz
from test_ella.cases import RedisTestCase as TestCase
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.redirects.models import Redirect
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from ella.core.models import Category, Publishable
from ella.core import signals
from ella.core.management import generate_publish_signals
from ella.core.management import unpublish_publish_to_expirations
from ella.utils import timezone
from nose import tools, SkipTest
from test_ella.test_core import create_basic_categories, create_and_place_a_publishable, default_time
class PublishableTestCase(TestCase):
def setUp(self):
super(PublishableTestCase, self).setUp()
create_basic_categories(self)
create_and_place_a_publishable(self)
class TestLastUpdated(PublishableTestCase):
def test_last_updated_moved_if_default(self):
now = timezone.now()
self.publishable.publish_from = now
self.publishable.save(force_update=True)
tools.assert_equals(now, self.publishable.last_updated)
def test_last_updated_isnt_moved_if_changed(self):
now = timezone.now()
self.publishable.last_updated = now + timedelta(days=1)
self.publishable.publish_from = now
self.publishable.save(force_update=True)
tools.assert_equals(now + timedelta(days=1), self.publishable.last_updated)
class TestPublishableHelpers(PublishableTestCase):
def test_url(self):
tools.assert_equals('/nested-category/2008/1/10/first-article/', self.publishable.get_absolute_url())
def test_tz_aware_url(self):
if not timezone.use_tz:
raise SkipTest()
utc = pytz.timezone('UTC')
self.publishable.publish_from = datetime(2008, 1, 9, 23, 50, 0, tzinfo=utc)
tools.assert_equals('/nested-category/2008/1/10/first-article/', self.publishable.get_absolute_url())
def test_https_url(self):
settings.USE_HTTPS = True
tools.assert_equals('https://' + self.category.site.domain + '/nested-category/2008/1/10/first-article/',
self.publishable.get_absolute_url(domain=True))
def test_domain_url(self):
tools.assert_equals('http://example.com/nested-category/2008/1/10/first-article/', self.publishable.get_domain_url())
def test_app_data(self):
tools.assert_equals({}, self.publishable.app_data)
self.publishable.app_data['core'] = 'testing'
self.publishable.save()
p = self.publishable.content_type.get_object_for_this_type(pk=self.publishable.pk)
tools.assert_equals({'core': 'testing'}, self.publishable.app_data)
def test_saving_base_publishable_does_not_update_content_type(self):
publishable_ct = ContentType.objects.get_for_model(Publishable)
current_ct = self.publishable.content_type
tools.assert_not_equals(publishable_ct, current_ct)
p = Publishable.objects.get(pk=self.publishable.pk)
p.save()
tools.assert_equals(current_ct, p.content_type)
class TestRedirects(PublishableTestCase):
def test_url_change_creates_redirect(self):
self.publishable.slug = 'old-article-new-slug'
self.publishable.save()
tools.assert_equals(1, Redirect.objects.count())
r = Redirect.objects.all()[0]
tools.assert_equals('/nested-category/2008/1/10/first-article/', r.old_path)
tools.assert_equals('/nested-category/2008/1/10/old-article-new-slug/', r.new_path)
tools.assert_equals(self.site_id, r.site_id)
def test_url_change_updates_existing_redirects(self):
r = Redirect.objects.create(site_id=self.site_id, new_path='/nested-category/2008/1/10/first-article/', old_path='some-path')
self.publishable.slug = 'old-article-new-slug'
self.publishable.save()
tools.assert_equals(2, Redirect.objects.count())
r = Redirect.objects.get(pk=r.pk)
tools.assert_equals('some-path', r.old_path)
tools.assert_equals('/nested-category/2008/1/10/old-article-new-slug/', r.new_path)
tools.assert_equals(self.site_id, r.site_id)
def test_ability_to_place_back_and_forth(self):
self.publishable.slug = 'old-article-new-slug'
self.publishable.save()
self.publishable.slug = 'first-article'
self.publishable.save()
self.publishable.slug = 'old-article-new-slug'
self.publishable.save()
class TestUrl(PublishableTestCase):
def test_home_url(self):
self.publishable.category = self.category
self.publishable.save()
tools.assert_equals('/2008/1/10/first-article/', self.publishable.get_absolute_url())
def test_url(self):
tools.assert_equals('/nested-category/2008/1/10/first-article/', self.publishable.get_absolute_url())
def test_url_on_other_site(self):
site = Site.objects.create(
name='some site',
domain='not-example.com'
)
category = Category.objects.create(
title=u"再见 category",
description=u"example testing category, second site",
site=site,
slug=u'zai-jian-category',
)
self.publishable.category = category
self.publishable.publish_from = default_time
self.publishable.save()
settings.USE_HTTPS = False
tools.assert_equals(u'http://not-example.com/2008/1/10/first-article/', self.publishable.get_absolute_url())
def test_unique_url_validation(self):
self.publishable.pk = None
tools.assert_raises(ValidationError, self.publishable.full_clean)
def test_url_is_tested_for_published_objects_only(self):
self.publishable.pk = None
self.publishable.published = False
self.publishable.full_clean()
class TestSignals(TestCase):
def setUp(self):
super(TestSignals, self).setUp()
signals.content_published.connect(self.publish)
signals.content_unpublished.connect(self.unpublish)
self._signal_clear()
create_basic_categories(self)
create_and_place_a_publishable(self)
def tearDown(self):
super(TestSignals, self).tearDown()
signals.content_published.disconnect(self.publish)
signals.content_unpublished.disconnect(self.unpublish)
def _signal_clear(self):
self.publish_received = []
self.unpublish_received = []
# signal handlers
def publish(self, **kwargs):
self.publish_received.append(kwargs)
def unpublish(self, **kwargs):
self.unpublish_received.append(kwargs)
def test_publishable_is_announced_on_save(self):
tools.assert_true(self.publishable.announced)
tools.assert_equals(1, len(self.publish_received))
tools.assert_equals(0, len(self.unpublish_received))
tools.assert_equals(self.publishable, self.publish_received[0]['publishable'])
def test_unpublish_sent_when_takedown_occurs(self):
self._signal_clear()
self.publishable.published = False
self.publishable.save()
tools.assert_false(self.publishable.announced)
tools.assert_equals(0, len(self.publish_received))
tools.assert_equals(1, len(self.unpublish_received))
tools.assert_equals(self.publishable, self.unpublish_received[0]['publishable'])
def test_generate_doesnt_issue_signal_tice(self):
self._signal_clear()
generate_publish_signals()
tools.assert_equals(0, len(self.publish_received))
tools.assert_equals(0, len(self.unpublish_received))
def test_generate_picks_up_on_takedown(self):
self.publishable.publish_to = timezone.now() + timedelta(days=1)
self.publishable.save()
self._signal_clear()
generate_publish_signals(timezone.now() + timedelta(days=1, seconds=2))
tools.assert_equals(0, len(self.publish_received))
tools.assert_equals(1, len(self.unpublish_received))
tools.assert_equals(self.publishable, self.unpublish_received[0]['publishable'].target)
def test_generate_picks_up_on_publish(self):
self.publishable.publish_from = timezone.now() + timedelta(days=1)
self.publishable.save()
self._signal_clear()
generate_publish_signals(timezone.now() + timedelta(days=1, seconds=2))
tools.assert_equals(1, len(self.publish_received))
tools.assert_equals(0, len(self.unpublish_received))
tools.assert_equals(self.publishable, self.publish_received[0]['publishable'].target)
def test_publish_to_unpublishes_at_time(self):
publish_to = timezone.now() - timedelta(hours=1)
self.publishable.publish_to = publish_to
self.publishable.published = True
self.publishable.save()
tools.assert_equals(1, len(Publishable.objects.filter(published=True, publish_to=publish_to)))
unpublish_publish_to_expirations(timezone.now())
tools.assert_equals(0, len(Publishable.objects.filter(published=True, publish_to=publish_to)))
| bsd-3-clause |
maxdm07/node-gyp | gyp/test/subdirectory/gyptest-SYMROOT-all.py | 399 | 1269 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
The configuration sets the Xcode SYMROOT variable and uses --depth=
to make Xcode behave like the other build tools--that is, put all
built targets in a single output build directory at the top of the tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')
test.relocate('src', 'relocate/src')
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
test.run_built_executable('prog2',
stdout="Hello from prog2.c\n",
chdir='relocate/src')
test.pass_test()
| mit |
citrix-openstack-build/tempest | tempest/api/compute/test_auth_token.py | 3 | 1732 | # Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.api.compute import base
import tempest.config as config
class AuthTokenTestJSON(base.BaseComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(AuthTokenTestJSON, cls).setUpClass()
cls.servers_v2 = cls.os.servers_client
cls.servers_v3 = cls.os.servers_client_v3_auth
def test_v2_token(self):
# Can get a token using v2 of the identity API and use that to perform
# an operation on the compute service.
# Doesn't matter which compute API is used,
# picking list_servers because it's easy.
self.servers_v2.list_servers()
@testtools.skipIf(not config.TempestConfig().identity.uri_v3,
'v3 auth client not configured')
def test_v3_token(self):
# Can get a token using v3 of the identity API and use that to perform
# an operation on the compute service.
# Doesn't matter which compute API is used,
# picking list_servers because it's easy.
self.servers_v3.list_servers()
class AuthTokenTestXML(AuthTokenTestJSON):
_interface = 'xml'
| apache-2.0 |
sslavov93/TreasureDungeon | map_validator.py | 1 | 4303 | import os
class MapValidator():
"""Base class. Validates user-created map file"""
def __init__(self, location):
self.map = self.read_file(location)
self.player_spawn = False
self.npc_spawn = False
self.key_present = False
self.chest_present = False
self.rectangular = False
self.borders = False
def read_file(self, map_file):
"""Loads dungeon map from specified absolute path in the harddisk
Args:
map_file - /full/path/to/map/file.txt. Type(String)"""
if not os.path.exists(map_file):
return ""
with open(map_file, "r") as f:
contents = f.read()
return contents
def generic_check(self, map_character):
"""Checks whether only a single character
occurence is present on the map
Args:
map_character: The char which occurence would be checked
"""
count = 0
for character in self.map:
if character == map_character:
count += 1
if count == 1:
return True
return False
def check_player_spawn(self):
"""Checks whether only one player spawn
indicator is present on the map"""
return self.generic_check("S")
def check_npc_spawn(self):
"""Checks whether one or more NPC spawn
indicators are present on the map"""
count = 0
for character in self.map:
if character == "N":
count += 1
if count != 0:
return True
return False
def check_dungeon_is_rectangular(self):
"""Checks whether the dungeon is a valid square
(Length is equal to Width)"""
dims = self.map.split("\n")
length = len(dims[0])
for each in dims:
if len(each) != length:
return False
return True
def check_dungeon_borders(self):
"""Checks whether the outer border of the dungeon
map consists of 'Z' characters"""
dims = self.map.split("\n")
for each in dims[0]:
if each != "Z":
return False
for each in dims[-1]:
if each != "Z":
return False
for each in dims[1:-1]:
if each[0] != "Z" or each[-1] != "Z":
return False
return True
def check_key_is_present(self):
"""Checks whether a chest key indicator is present on the map"""
return self.generic_check("K")
def check_chest_is_present(self):
"""Checks whether a chest indicator is present on the map"""
return self.generic_check("C")
def validate_map(self):
"""Utilize all the helper methods to check whether the input map is
valid and conforms to all the specified conditions."""
self.npc_spawn = self.check_npc_spawn()
self.player_spawn = self.check_player_spawn()
self.key_present = self.check_key_is_present()
self.chest_present = self.check_chest_is_present()
self.borders = self.check_dungeon_borders()
self.rectangular = self.check_dungeon_is_rectangular()
return (self.player_spawn and self.npc_spawn and self.key_present and
self.chest_present and self.rectangular and self.borders)
def generate_message(self):
"""Generate a return message after the map validation process."""
message = ""
if (self.player_spawn and self.npc_spawn and self.key_present and
self.chest_present and self.rectangular and self.borders):
return "Your map is valid. Dungeon loaded."
if not self.player_spawn:
message += "There is an error with the player spawn slots.\n"
if not self.npc_spawn:
message += "There is an error with the NPC spawn slots.\n"
if not self.key_present:
message += "There is an error with the key slot.\n"
if not self.chest_present:
message += "There is an error with the chest slot.\n"
if not self.rectangular:
message += "Your dungeon is not rectangular.\n"
if not self.borders:
message += "There is an error with the dungeon borders."
return message
| gpl-2.0 |
kapy2010/treeherder | treeherder/log_parser/tasks.py | 1 | 3773 | import logging
import newrelic.agent
from django.conf import settings
from treeherder.autoclassify.tasks import autoclassify
from treeherder.log_parser.crossreference import crossreference_job
from treeherder.log_parser.utils import post_log_artifacts
from treeherder.model.models import (Job,
JobLog)
from treeherder.workers.task import retryable_task
from . import failureline
logger = logging.getLogger(__name__)
def parser_task(f):
"""Decorator that ensures that log parsing task has not already run,
and also adds New Relic annotations.
"""
def inner(job_log_id, priority):
newrelic.agent.add_custom_parameter("job_log_id", job_log_id)
job_log = JobLog.objects.select_related("job").get(id=job_log_id)
newrelic.agent.add_custom_parameter("job_log_name", job_log.name)
newrelic.agent.add_custom_parameter("job_log_url", job_log.url)
newrelic.agent.add_custom_parameter("job_log_status_prior",
job_log.get_status_display())
if job_log.status == JobLog.PARSED:
logger.info("log already parsed")
return True
return f(job_log, priority)
inner.__name__ = f.__name__
inner.__doc__ = f.__doc__
return inner
def parse_job_log(func_name, routing_key, job_log):
"""
Schedule the log-related tasks to parse an individual log
"""
task_funcs = {
"store_failure_lines": store_failure_lines,
"parse_log": parse_log,
}
logger.debug("parse_job_log for job log %s (%s, %s)",
job_log.id, func_name, routing_key)
priority = routing_key.rsplit(".", 1)[1]
signature = task_funcs[func_name].si(job_log.id, priority)
signature.set(routing_key=routing_key)
signature.apply_async()
@retryable_task(name='log-parser', max_retries=10)
@parser_task
def parse_log(job_log, priority):
"""
Call ArtifactBuilderCollection on the given job.
"""
post_log_artifacts(job_log)
logger.debug("Scheduling crossreference for job %i from parse_log" % job_log.job.id)
crossreference_error_lines.apply_async(
args=[job_log.job.id, priority],
routing_key="crossreference_error_lines.%s" % priority)
@retryable_task(name='store-failure-lines', max_retries=10)
@parser_task
def store_failure_lines(job_log, priority):
"""Store the failure lines from a log corresponding to the structured
errorsummary file."""
logger.debug('Running store_failure_lines for job %s' % job_log.job.id)
failureline.store_failure_lines(job_log)
logger.debug("Scheduling crossreference for job %i from store_failure_lines" % job_log.job.id)
crossreference_error_lines.apply_async(
args=[job_log.job.id, priority],
routing_key="crossreference_error_lines.%s" % priority)
@retryable_task(name='crossreference-error-lines', max_retries=10)
def crossreference_error_lines(job_id, priority):
"""Match structured (FailureLine) and unstructured (TextLogError) lines
for a job."""
newrelic.agent.add_custom_parameter("job_id", job_id)
logger.debug("Running crossreference-error-lines for job %s" % job_id)
job = Job.objects.get(id=job_id)
has_lines = crossreference_job(job)
if has_lines and settings.AUTOCLASSIFY_JOBS:
logger.debug("Scheduling autoclassify for job %i" % job_id)
autoclassify.apply_async(
args=[job_id],
routing_key="autoclassify.%s" % priority)
elif not settings.AUTOCLASSIFY_JOBS:
job.autoclassify_status = Job.SKIPPED
job.save(update_fields=['autoclassify_status'])
else:
logger.debug("Job %i didn't have any crossreferenced lines, skipping autoclassify " % job_id)
| mpl-2.0 |
Frodox/buildbot | master/buildbot/test/unit/test_plugins.py | 10 | 14759 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Unit tests for the plugin framework
"""
from __future__ import absolute_import
from __future__ import print_function
import re
import mock
from twisted.trial import unittest
from zope.interface import implementer
import buildbot.plugins.db
from buildbot.errors import PluginDBError
from buildbot.interfaces import IPlugin
from buildbot.test.util.warnings import assertNotProducesWarnings
from buildbot.test.util.warnings import assertProducesWarning
from buildbot.worker_transition import DeprecatedWorkerAPIWarning
from buildbot.worker_transition import DeprecatedWorkerNameWarning
# buildbot.plugins.db needs to be imported for patching, however just 'db' is
# much shorter for using in tests
db = buildbot.plugins.db
class FakeEntry(object):
"""
An entry suitable for unit tests
"""
def __init__(self, name, project_name, version, fail_require, value):
self._name = name
self._dist = mock.Mock(spec_set=['project_name', 'version'])
self._dist.project_name = project_name
self._dist.version = version
self._fail_require = fail_require
self._value = value
@property
def name(self):
"entry name"
return self._name
@property
def dist(self):
"dist thingie"
return self._dist
def require(self):
"""
handle external dependencies
"""
if self._fail_require:
raise RuntimeError('Fail require as requested')
def load(self):
"""
handle loading
"""
return self._value
class ITestInterface(IPlugin):
"""
test interface
"""
def hello(name):
"Greets by :param:`name`"
@implementer(ITestInterface)
class ClassWithInterface(object):
"""
a class to implement a simple interface
"""
def __init__(self, name=None):
self._name = name
def hello(self, name=None):
'implement the required method'
return name or self._name
class ClassWithNoInterface(object):
"""
just a class
"""
# NOTE: buildbot.plugins.db prepends the group with common namespace --
# 'buildbot.'
_FAKE_ENTRIES = {
'buildbot.interface': [
FakeEntry('good', 'non-existent', 'irrelevant', False,
ClassWithInterface),
FakeEntry('deep.path', 'non-existent', 'irrelevant', False,
ClassWithInterface)
],
'buildbot.interface_failed': [
FakeEntry('good', 'non-existent', 'irrelevant', True,
ClassWithInterface)
],
'buildbot.no_interface': [
FakeEntry('good', 'non-existent', 'irrelevant', False,
ClassWithNoInterface)
],
'buildbot.no_interface_again': [
FakeEntry('good', 'non-existent', 'irrelevant', False,
ClassWithNoInterface)
],
'buildbot.no_interface_failed': [
FakeEntry('good', 'non-existent', 'irrelevant', True,
ClassWithNoInterface)
],
'buildbot.duplicates': [
FakeEntry('good', 'non-existent', 'first', False,
ClassWithNoInterface),
FakeEntry('good', 'non-existent', 'second', False,
ClassWithNoInterface)
]
}
def provide_fake_entries(group):
"""
give a set of fake entries for known groups
"""
return _FAKE_ENTRIES.get(group, [])
@mock.patch('buildbot.plugins.db.iter_entry_points', provide_fake_entries)
class TestBuildbotPlugins(unittest.TestCase):
def setUp(self):
buildbot.plugins.db._DB = buildbot.plugins.db._PluginDB()
def test_check_group_registration(self):
with mock.patch.object(buildbot.plugins.db, '_DB', db._PluginDB()):
# The groups will be prepended with namespace, so info() will
# return a dictionary with right keys, but no data
groups = set(_FAKE_ENTRIES.keys())
for group in groups:
db.get_plugins(group)
registered = set(db.info().keys())
self.assertEqual(registered, groups)
self.assertEqual(registered, set(db.namespaces()))
def test_interface_provided_simple(self):
# Basic check before the actual test
self.assertTrue(ITestInterface.implementedBy(ClassWithInterface))
plugins = db.get_plugins('interface', interface=ITestInterface)
self.assertTrue('good' in plugins.names)
result_get = plugins.get('good')
result_getattr = plugins.good
self.assertFalse(result_get is None)
self.assertTrue(result_get is result_getattr)
# Make sure we actually got our class
greeter = result_get('yes')
self.assertEqual('yes', greeter.hello())
self.assertEqual('no', greeter.hello('no'))
def test_missing_plugin(self):
plugins = db.get_plugins('interface', interface=ITestInterface)
self.assertRaises(AttributeError, getattr, plugins, 'bad')
self.assertRaises(PluginDBError, plugins.get, 'bad')
self.assertRaises(PluginDBError, plugins.get, 'good.extra')
def test_interface_provided_deep(self):
# Basic check before the actual test
self.assertTrue(ITestInterface.implementedBy(ClassWithInterface))
plugins = db.get_plugins('interface', interface=ITestInterface)
self.assertTrue('deep.path' in plugins.names)
self.assertTrue('deep.path' in plugins)
self.assertFalse('even.deeper.path' in plugins)
result_get = plugins.get('deep.path')
result_getattr = plugins.deep.path
self.assertFalse(result_get is None)
self.assertTrue(result_get is result_getattr)
# Make sure we actually got our class
greeter = result_get('yes')
self.assertEqual('yes', greeter.hello())
self.assertEqual('no', greeter.hello('no'))
def test_interface_provided_deps_failed(self):
plugins = db.get_plugins('interface_failed', interface=ITestInterface,
check_extras=True)
self.assertRaises(PluginDBError, plugins.get, 'good')
def test_required_interface_not_provided(self):
plugins = db.get_plugins('no_interface_again',
interface=ITestInterface)
self.assertTrue(plugins._interface is ITestInterface)
self.assertRaises(PluginDBError, plugins.get, 'good')
def test_no_interface_provided(self):
plugins = db.get_plugins('no_interface')
self.assertFalse(plugins.get('good') is None)
def test_no_interface_provided_deps_failed(self):
plugins = db.get_plugins('no_interface_failed', check_extras=True)
self.assertRaises(PluginDBError, plugins.get, 'good')
def test_failure_on_dups(self):
self.assertRaises(PluginDBError, db.get_plugins, 'duplicates',
load_now=True)
def test_get_info_on_a_known_plugin(self):
plugins = db.get_plugins('interface')
self.assertEqual(('non-existent', 'irrelevant'), plugins.info('good'))
def test_failure_on_unknown_plugin_info(self):
plugins = db.get_plugins('interface')
self.assertRaises(PluginDBError, plugins.info, 'bad')
def test_failure_on_unknown_plugin_get(self):
plugins = db.get_plugins('interface')
self.assertRaises(PluginDBError, plugins.get, 'bad')
class SimpleFakeEntry(FakeEntry):
def __init__(self, name, value):
FakeEntry.__init__(self, name, 'non-existent', 'irrelevant', False,
value)
_WORKER_FAKE_ENTRIES = {
'buildbot.worker': [
SimpleFakeEntry('Worker', ClassWithInterface),
SimpleFakeEntry('EC2LatentWorker', ClassWithInterface),
SimpleFakeEntry('LibVirtWorker', ClassWithInterface),
SimpleFakeEntry('OpenStackLatentWorker', ClassWithInterface),
SimpleFakeEntry('newthirdparty', ClassWithInterface),
SimpleFakeEntry('deep.newthirdparty', ClassWithInterface),
],
'buildbot.buildslave': [
SimpleFakeEntry('thirdparty', ClassWithInterface),
SimpleFakeEntry('deep.thirdparty', ClassWithInterface),
],
'buildbot.util': [
SimpleFakeEntry('WorkerLock', ClassWithInterface),
SimpleFakeEntry('enforceChosenWorker', ClassWithInterface),
SimpleFakeEntry('WorkerChoiceParameter', ClassWithInterface),
],
}
def provide_worker_fake_entries(group):
"""
give a set of fake entries for known groups
"""
return _WORKER_FAKE_ENTRIES.get(group, [])
class TestWorkerPluginsTransition(unittest.TestCase):
def setUp(self):
buildbot.plugins.db._DB = buildbot.plugins.db._PluginDB()
with mock.patch('buildbot.plugins.db.iter_entry_points',
provide_worker_fake_entries):
self.worker_ns = db.get_plugins('worker')
self.buildslave_ns = db.get_plugins('buildslave')
self.util_ns = db.get_plugins('util')
def test_new_api(self):
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertTrue(self.worker_ns.Worker is ClassWithInterface)
def test_old_api_access_produces_warning(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=r"'buildbot\.plugins\.buildslave' plugins "
"namespace is deprecated"):
# Old API, with warning
self.assertTrue(
self.buildslave_ns.BuildSlave is ClassWithInterface)
def test_new_api_through_old_namespace(self):
# Access of newly named workers through old entry point is an error.
with assertProducesWarning(DeprecatedWorkerNameWarning,
message_pattern="namespace is deprecated"):
self.assertRaises(
AttributeError, lambda: self.buildslave_ns.Worker)
def test_old_api_through_new_namespace(self):
# Access of old-named workers through new API is an error.
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertRaises(AttributeError,
lambda: self.worker_ns.BuildSlave)
def test_old_api_thirdparty(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=r"'buildbot\.plugins\.buildslave' plugins "
"namespace is deprecated"):
# Third party plugins that use old API should work through old API.
self.assertTrue(
self.buildslave_ns.thirdparty is ClassWithInterface)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
# Third party plugins that use old API should work through new API.
self.assertTrue(
self.worker_ns.thirdparty is ClassWithInterface)
def test_old_api_thirdparty_deep(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=r"'buildbot\.plugins\.buildslave' plugins "
"namespace is deprecated"):
self.assertTrue(
self.buildslave_ns.deep.thirdparty is ClassWithInterface)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertTrue(
self.worker_ns.deep.thirdparty is ClassWithInterface)
def test_new_api_thirdparty(self):
# Third party plugins that use new API should work only through
# new API.
with assertProducesWarning(DeprecatedWorkerNameWarning,
message_pattern="namespace is deprecated"):
self.assertRaises(AttributeError,
lambda: self.buildslave_ns.newthirdparty)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertTrue(
self.worker_ns.newthirdparty is ClassWithInterface)
def test_new_api_thirdparty_deep(self):
# TODO: Why it's not AttributeError (as in tests above), but
# PluginDBError?
with assertProducesWarning(DeprecatedWorkerNameWarning,
message_pattern="namespace is deprecated"):
self.assertRaises(PluginDBError,
lambda: self.buildslave_ns.deep.newthirdparty)
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertTrue(
self.worker_ns.deep.newthirdparty is ClassWithInterface)
def test_util_SlaveLock_import(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=re.escape(
"'buildbot.util.SlaveLock' is deprecated, "
"use 'buildbot.util.WorkerLock' instead")):
deprecated = self.util_ns.SlaveLock
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertIdentical(deprecated, ClassWithInterface)
def test_util_enforceChosenSlave_import(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=re.escape(
"'buildbot.util.enforceChosenSlave' is deprecated, "
"use 'buildbot.util.enforceChosenWorker' instead")):
deprecated = self.util_ns.enforceChosenSlave
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertIdentical(deprecated, ClassWithInterface)
def test_util_BuildslaveChoiceParameter_import(self):
with assertProducesWarning(
DeprecatedWorkerNameWarning,
message_pattern=re.escape(
"'buildbot.util.BuildslaveChoiceParameter' is deprecated, "
"use 'buildbot.util.WorkerChoiceParameter' instead")):
deprecated = self.util_ns.BuildslaveChoiceParameter
with assertNotProducesWarnings(DeprecatedWorkerAPIWarning):
self.assertIdentical(deprecated, ClassWithInterface)
| gpl-2.0 |
TomasDuro/paparazzi | sw/ground_segment/python/messages_app/messagesframe.py | 20 | 6371 | import wx
import sys
import time
import threading
from os import path, getenv
# if PAPARAZZI_SRC not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../../')))
sys.path.append(PPRZ_SRC + "/sw/lib/python")
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
PPRZ_HOME = getenv("PAPARAZZI_HOME", PPRZ_SRC)
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage
WIDTH = 450
LABEL_WIDTH = 166
DATA_WIDTH = 100
HEIGHT = 800
BORDER = 1
class Message(PprzMessage):
def __init__(self, class_name, name):
super(Message, self).__init__(class_name, name)
self.field_controls = []
self.index = None
self.last_seen = time.clock()
class Aircraft(object):
def __init__(self, ac_id):
self.ac_id = ac_id
self.messages = {}
self.messages_book = None
class MessagesFrame(wx.Frame):
def message_recv(self, ac_id, msg):
"""Handle incoming messages
Callback function for IvyMessagesInterface
:param ac_id: aircraft id
:type ac_id: int
:param msg: message
:type msg: PprzMessage
"""
# only show messages of the requested class
if msg.msg_class != self.msg_class:
return
if ac_id in self.aircrafts and msg.name in self.aircrafts[ac_id].messages:
if time.time() - self.aircrafts[ac_id].messages[msg.name].last_seen < 0.2:
return
wx.CallAfter(self.gui_update, ac_id, msg)
def find_page(self, book, name):
if book.GetPageCount() < 1:
return 0
start = 0
end = book.GetPageCount()
while start < end:
if book.GetPageText(start) >= name:
return start
start += 1
return start
def update_leds(self):
wx.CallAfter(self.update_leds_real)
def update_leds_real(self):
for ac_id in self.aircrafts:
aircraft = self.aircrafts[ac_id]
for msg_str in aircraft.messages:
message = aircraft.messages[msg_str]
if message.last_seen + 0.2 < time.time():
aircraft.messages_book.SetPageImage(message.index, 0)
self.timer = threading.Timer(0.1, self.update_leds)
self.timer.start()
def setup_image_list(self, notebook):
imageList = wx.ImageList(24, 24)
image = wx.Image(PPRZ_HOME + "/data/pictures/gray_led24.png")
bitmap = wx.BitmapFromImage(image)
imageList.Add(bitmap)
image = wx.Image(PPRZ_HOME + "/data/pictures/green_led24.png")
bitmap = wx.BitmapFromImage(image)
imageList.Add(bitmap)
notebook.AssignImageList(imageList)
def add_new_aircraft(self, ac_id):
self.aircrafts[ac_id] = Aircraft(ac_id)
ac_panel = wx.Panel(self.notebook, -1)
self.notebook.AddPage(ac_panel, str(ac_id))
messages_book = wx.Notebook(ac_panel, style=wx.NB_LEFT)
self.setup_image_list(messages_book)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(messages_book, 1, wx.EXPAND)
ac_panel.SetSizer(sizer)
sizer.Layout()
self.aircrafts[ac_id].messages_book = messages_book
def add_new_message(self, aircraft, msg_class, name):
messages_book = aircraft.messages_book
aircraft.messages[name] = Message(msg_class, name)
field_panel = wx.Panel(messages_book)
grid_sizer = wx.FlexGridSizer(len(aircraft.messages[name].fieldnames), 2)
index = self.find_page(messages_book, name)
messages_book.InsertPage(index, field_panel, name, imageId=1)
aircraft.messages[name].index = index
# update indexes of pages which are to be moved
for message_name in aircraft.messages:
aircraft.messages[message_name].index = self.find_page(messages_book, message_name)
for field_name in aircraft.messages[name].fieldnames:
name_text = wx.StaticText(field_panel, -1, field_name)
size = name_text.GetSize()
size.x = LABEL_WIDTH
name_text.SetMinSize(size)
grid_sizer.Add(name_text, 1, wx.ALL, BORDER)
value_control = wx.StaticText(field_panel, -1, "42", style=wx.EXPAND)
size = value_control.GetSize()
size.x = LABEL_WIDTH
value_control.SetMinSize(size)
grid_sizer.Add(value_control, 1, wx.ALL | wx.EXPAND, BORDER)
if wx.MAJOR_VERSION > 2:
if grid_sizer.IsColGrowable(1):
grid_sizer.AddGrowableCol(1)
else:
grid_sizer.AddGrowableCol(1)
aircraft.messages[name].field_controls.append(value_control)
field_panel.SetAutoLayout(True)
field_panel.SetSizer(grid_sizer)
field_panel.Layout()
def gui_update(self, ac_id, msg):
if ac_id not in self.aircrafts:
self.add_new_aircraft(ac_id)
aircraft = self.aircrafts[ac_id]
if msg.name not in aircraft.messages:
self.add_new_message(aircraft, msg.msg_class, msg.name)
aircraft.messages_book.SetPageImage(aircraft.messages[msg.name].index, 1)
self.aircrafts[ac_id].messages[msg.name].last_seen = time.time()
for index in range(0, len(msg.fieldvalues)):
aircraft.messages[msg.name].field_controls[index].SetLabel(msg.get_field(index))
def __init__(self, msg_class="telemetry"):
wx.Frame.__init__(self, id=-1, parent=None, name=u'MessagesFrame', size=wx.Size(WIDTH, HEIGHT), style=wx.DEFAULT_FRAME_STYLE, title=u'Messages')
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.notebook = wx.Notebook(self)
self.aircrafts = {}
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add(self.notebook, 1, wx.EXPAND)
self.SetSizer(sizer)
sizer.Layout()
self.timer = threading.Timer(0.1, self.update_leds)
self.timer.start()
self.msg_class = msg_class
self.interface = IvyMessagesInterface("Paparazzi Messages Viewer")
self.interface.subscribe(self.message_recv)
def OnClose(self, event):
self.timer.cancel()
self.interface.shutdown()
self.Destroy()
| gpl-2.0 |
ojengwa/python_koans | python3/runner/runner_tests/test_sensei.py | 102 | 10551 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import unittest
import re
from libs.mock import *
from runner.sensei import Sensei
from runner.writeln_decorator import WritelnDecorator
from runner.mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
class AboutParrots:
pass
class AboutLumberjacks:
pass
class AboutTennis:
pass
class AboutTheKnightsWhoSayNi:
pass
class AboutMrGumby:
pass
class AboutMessiahs:
pass
class AboutGiantFeet:
pass
class AboutTrebuchets:
pass
class AboutFreemasons:
pass
error_assertion_with_message = """Traceback (most recent call last):
File "/Users/Greg/hg/python_koans/koans/about_exploding_trousers.py ", line 43, in test_durability
self.assertEqual("Steel","Lard", "Another fine mess you've got me into Stanley...")
AssertionError: Another fine mess you've got me into Stanley..."""
error_assertion_equals = """
Traceback (most recent call last):
File "/Users/Greg/hg/python_koans/koans/about_exploding_trousers.py", line 49, in test_math
self.assertEqual(4,99)
AssertionError: 4 != 99
"""
error_assertion_true = """Traceback (most recent call last):
File "/Users/Greg/hg/python_koans/koans/about_armories.py", line 25, in test_weoponary
self.assertTrue("Pen" > "Sword")
AssertionError
"""
error_mess = """
Traceback (most recent call last):
File "contemplate_koans.py", line 5, in <module>
from runner.mountain import Mountain
File "/Users/Greg/hg/python_koans/runner/mountain.py", line 7, in <module>
import path_to_enlightenment
File "/Users/Greg/hg/python_koans/runner/path_to_enlightenment.py", line 8, in <module>
from koans import *
File "/Users/Greg/hg/python_koans/koans/about_asserts.py", line 20
self.assertTrue(eoe"Pen" > "Sword", "nhnth")
^
SyntaxError: invalid syntax"""
error_with_list = """Traceback (most recent call last):
File "/Users/Greg/hg/python_koans/koans/about_armories.py", line 84, in test_weoponary
self.assertEqual([1, 9], [1, 2])
AssertionError: Lists differ: [1, 9] != [1, 2]
First differing element 1:
9
2
- [1, 9]
? ^
+ [1, 2]
? ^
"""
class TestSensei(unittest.TestCase):
def setUp(self):
self.sensei = Sensei(WritelnDecorator(sys.stdout))
self.sensei.stream.writeln = Mock()
path_to_enlightenment.koans = Mock()
self.tests = Mock()
self.tests.countTestCases = Mock()
def test_that_it_successes_only_count_if_passes_are_currently_allowed(self):
self.sensei.passesCount = Mock()
MockableTestResult.addSuccess = Mock()
self.sensei.addSuccess(Mock())
self.assertTrue(self.sensei.passesCount.called)
def test_that_it_increases_the_passes_on_every_success(self):
pass_count = self.sensei.pass_count
MockableTestResult.addSuccess = Mock()
self.sensei.addSuccess(Mock())
self.assertEqual(pass_count + 1, self.sensei.pass_count)
def test_that_nothing_is_returned_as_sorted_result_if_there_are_no_failures(self):
self.sensei.failures = []
self.assertEqual(None, self.sensei.sortFailures("AboutLife"))
def test_that_nothing_is_returned_as_sorted_result_if_there_are_no_relevent_failures(self):
self.sensei.failures = [
(AboutTheKnightsWhoSayNi(),"File 'about_the_knights_whn_say_ni.py', line 24"),
(AboutMessiahs(),"File 'about_messiahs.py', line 43"),
(AboutMessiahs(),"File 'about_messiahs.py', line 844")
]
self.assertEqual(None, self.sensei.sortFailures("AboutLife"))
def test_that_nothing_is_returned_as_sorted_result_if_there_are_3_shuffled_results(self):
self.sensei.failures = [
(AboutTennis(),"File 'about_tennis.py', line 299"),
(AboutTheKnightsWhoSayNi(),"File 'about_the_knights_whn_say_ni.py', line 24"),
(AboutTennis(),"File 'about_tennis.py', line 30"),
(AboutMessiahs(),"File 'about_messiahs.py', line 43"),
(AboutTennis(),"File 'about_tennis.py', line 2"),
(AboutMrGumby(),"File 'about_mr_gumby.py', line odd"),
(AboutMessiahs(),"File 'about_messiahs.py', line 844")
]
expected = [
(AboutTennis(),"File 'about_tennis.py', line 2"),
(AboutTennis(),"File 'about_tennis.py', line 30"),
(AboutTennis(),"File 'about_tennis.py', line 299")
]
results = self.sensei.sortFailures("AboutTennis")
self.assertEqual(3, len(results))
self.assertEqual(2, results[0][0])
self.assertEqual(30, results[1][0])
self.assertEqual(299, results[2][0])
def test_that_it_will_choose_not_find_anything_with_non_standard_error_trace_string(self):
self.sensei.failures = [
(AboutMrGumby(),"File 'about_mr_gumby.py', line MISSING"),
]
self.assertEqual(None, self.sensei.sortFailures("AboutMrGumby"))
def test_that_it_will_choose_correct_first_result_with_lines_9_and_27(self):
self.sensei.failures = [
(AboutTrebuchets(),"File 'about_trebuchets.py', line 27"),
(AboutTrebuchets(),"File 'about_trebuchets.py', line 9"),
(AboutTrebuchets(),"File 'about_trebuchets.py', line 73v")
]
self.assertEqual("File 'about_trebuchets.py', line 9", self.sensei.firstFailure()[1])
def test_that_it_will_choose_correct_first_result_with_multiline_test_classes(self):
self.sensei.failures = [
(AboutGiantFeet(),"File 'about_giant_feet.py', line 999"),
(AboutGiantFeet(),"File 'about_giant_feet.py', line 44"),
(AboutFreemasons(),"File 'about_freemasons.py', line 1"),
(AboutFreemasons(),"File 'about_freemasons.py', line 11")
]
self.assertEqual("File 'about_giant_feet.py', line 44", self.sensei.firstFailure()[1])
def test_that_error_report_features_a_stack_dump(self):
self.sensei.scrapeInterestingStackDump = Mock()
self.sensei.firstFailure = Mock()
self.sensei.firstFailure.return_value = (Mock(), "FAILED")
self.sensei.errorReport()
self.assertTrue(self.sensei.scrapeInterestingStackDump.called)
def test_that_scraping_the_assertion_error_with_nothing_gives_you_a_blank_back(self):
self.assertEqual("", self.sensei.scrapeAssertionError(None))
def test_that_scraping_the_assertion_error_with_messaged_assert(self):
self.assertEqual(" AssertionError: Another fine mess you've got me into Stanley...",
self.sensei.scrapeAssertionError(error_assertion_with_message))
def test_that_scraping_the_assertion_error_with_assert_equals(self):
self.assertEqual(" AssertionError: 4 != 99",
self.sensei.scrapeAssertionError(error_assertion_equals))
def test_that_scraping_the_assertion_error_with_assert_true(self):
self.assertEqual(" AssertionError",
self.sensei.scrapeAssertionError(error_assertion_true))
def test_that_scraping_the_assertion_error_with_syntax_error(self):
self.assertEqual(" SyntaxError: invalid syntax",
self.sensei.scrapeAssertionError(error_mess))
def test_that_scraping_the_assertion_error_with_list_error(self):
self.assertEqual(""" AssertionError: Lists differ: [1, 9] != [1, 2]
First differing element 1:
9
2
- [1, 9]
? ^
+ [1, 2]
? ^""",
self.sensei.scrapeAssertionError(error_with_list))
def test_that_scraping_a_non_existent_stack_dump_gives_you_nothing(self):
self.assertEqual("", self.sensei.scrapeInterestingStackDump(None))
def test_that_if_there_are_no_failures_say_the_final_zenlike_remark(self):
self.sensei.failures = None
words = self.sensei.say_something_zenlike()
m = re.search("Spanish Inquisition", words)
self.assertTrue(m and m.group(0))
def test_that_if_there_are_0_successes_it_will_say_the_first_zen_of_python_koans(self):
self.sensei.pass_count = 0
self.sensei.failures = Mock()
words = self.sensei.say_something_zenlike()
m = re.search("Beautiful is better than ugly", words)
self.assertTrue(m and m.group(0))
def test_that_if_there_is_1_successes_it_will_say_the_second_zen_of_python_koans(self):
self.sensei.pass_count = 1
self.sensei.failures = Mock()
words = self.sensei.say_something_zenlike()
m = re.search("Explicit is better than implicit", words)
self.assertTrue(m and m.group(0))
def test_that_if_there_is_10_successes_it_will_say_the_sixth_zen_of_python_koans(self):
self.sensei.pass_count = 10
self.sensei.failures = Mock()
words = self.sensei.say_something_zenlike()
m = re.search("Sparse is better than dense", words)
self.assertTrue(m and m.group(0))
def test_that_if_there_is_36_successes_it_will_say_the_final_zen_of_python_koans(self):
self.sensei.pass_count = 36
self.sensei.failures = Mock()
words = self.sensei.say_something_zenlike()
m = re.search("Namespaces are one honking great idea", words)
self.assertTrue(m and m.group(0))
def test_that_if_there_is_37_successes_it_will_say_the_first_zen_of_python_koans_again(self):
self.sensei.pass_count = 37
self.sensei.failures = Mock()
words = self.sensei.say_something_zenlike()
m = re.search("Beautiful is better than ugly", words)
self.assertTrue(m and m.group(0))
def test_that_total_lessons_return_7_if_there_are_7_lessons(self):
self.sensei.filter_all_lessons = Mock()
self.sensei.filter_all_lessons.return_value = [1,2,3,4,5,6,7]
self.assertEqual(7, self.sensei.total_lessons())
def test_that_total_lessons_return_0_if_all_lessons_is_none(self):
self.sensei.filter_all_lessons = Mock()
self.sensei.filter_all_lessons.return_value = None
self.assertEqual(0, self.sensei.total_lessons())
def test_total_koans_return_43_if_there_are_43_test_cases(self):
self.sensei.tests.countTestCases = Mock()
self.sensei.tests.countTestCases.return_value = 43
self.assertEqual(43, self.sensei.total_koans())
def test_filter_all_lessons_will_discover_test_classes_if_none_have_been_discovered_yet(self):
self.sensei.all_lessons = 0
self.assertTrue(len(self.sensei.filter_all_lessons()) > 10)
self.assertTrue(len(self.sensei.all_lessons) > 10)
| mit |
joaduo/mepinta | plugins/c_and_cpp/k3dv1/importing_plugins/k3dv1.py | 1 | 2794 | # -*- coding: utf-8 -*-
'''
Mepinta
Copyright (c) 2011-2012, Joaquin G. Duo
This file is part of Mepinta.
Mepinta is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mepinta is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mepinta. If not, see <http://www.gnu.org/licenses/>.
'''
from importing_plugins.base import createPackageImport, importDataTypes,\
importProcessors
def import_data_types():
# k3d_data_types ="""k3dv1_Bitmap
# k3dv1_Color
# k3dv1_Material
# k3dv1_Matrix4
# k3dv1_Mesh
# k3dv1_MeshSelection
# k3dv1_Points
# k3dv1_Polyhedron
# k3dv1_Primitive"""
eclipse_root = "/home/jduo/Projects/Informatica/Mepinta/EclipseProjects_Basic_Data_Types/k3dv1/DataTypes"
k3d_data_types = {
"k3dv1": "bitmap color imaterial matrix4 mesh".split(),
"k3dv1.selection": "set".split(),
}
data_type_imports = [(k3d_data_types, eclipse_root)]
importDataTypes(data_type_imports)
def import_processors():
# k3d_processors= { "k3dv1.Matrix4.modifiers" : ["inverse"],
# "k3dv1.Mesh.input" : ["K3DMeshReader"],
# "k3dv1.Mesh.modifiers" : ["FlipOrientation","MakeSDS","DeformationExpression", "extrude_faces", "QSlim","select_face_by_number"],
# "k3dv1.Mesh.modifiers.deformation" : ["DeformationExpression","MorphPoints","TransformPoints"],
# "k3dv1.Mesh.generators" : ["cube","sphere",],
# "k3dv1.Mesh.output" : ["mesh_writer"],
# "k3dv1.Mesh.selection" : ["SelectPointsExpression"],
# "k3dv1.Mesh.output" : ["SimpleOpenGLOutput"],
# "k3dv1.Bitmap.generators" : ["bitmap_checker", "blank_bitmap"]}
eclipse_root = "/home/jduo/Projects/Informatica/Mepinta/EclipseProjects_Basic_Data_Types/k3dv1"
k3d_processors = {"k3dv1.mesh.generators.polyhedron":
"PolyCone PolyCube PolyCylinder PolySphere PolyTorus".split(),
#"k3dv1.Mesh.modifiers.deformation" : ["DeformationTemporalExpression"],
#"k3dv1.Mesh.output" : ["OgreRenderOutput"],
}
processors_imports = [(k3d_processors, eclipse_root)]
importProcessors(processors_imports)
if __name__ == "__main__":
import_data_types()
# import_processors()
| gpl-3.0 |
tplavcic/percona-xtrabackup | storage/innobase/xtrabackup/test/python/junitxml/__init__.py | 42 | 7719 | #
# junitxml: extensions to Python unittest to get output junitxml
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Copying permitted under the LGPL-3 licence, included with this library.
"""unittest compatible JUnit XML output."""
import datetime
import re
import time
import unittest
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (0, 7, 0, 'alpha', 0)
def test_suite():
import junitxml.tests
return junitxml.tests.test_suite()
class LocalTimezone(datetime.tzinfo):
def __init__(self):
self._offset = None
# It seems that the minimal possible implementation is to just return all
# None for every function, but then it breaks...
def utcoffset(self, dt):
if self._offset is None:
t = 1260423030 # arbitrary, but doesn't handle dst very well
dt = datetime.datetime
self._offset = (dt.fromtimestamp(t) - dt.utcfromtimestamp(t))
return self._offset
def dst(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return None
def _error_name(eclass):
module = eclass.__module__
if module not in ("__main__", "builtins", "exceptions"):
return ".".join([module, eclass.__name__])
return eclass.__name__
_non_cdata = "[\0-\b\x0B-\x1F\uD800-\uDFFF\uFFFE\uFFFF]+"
if "\\u" in _non_cdata:
_non_cdata = _non_cdata.decode("unicode-escape")
def _strip_invalid_chars(s, _sub=re.compile(_non_cdata, re.UNICODE).sub):
if not isinstance(s, unicode):
try:
s = s.decode("utf-8")
except UnicodeDecodeError:
s = s.decode("ascii", "replace")
return _sub("", s).encode("utf-8")
else:
def _strip_invalid_chars(s, _sub=re.compile(_non_cdata, re.UNICODE).sub):
return _sub("", s)
def _escape_content(s):
return (_strip_invalid_chars(s)
.replace("&", "&")
.replace("<", "<")
.replace("]]>", "]]>"))
def _escape_attr(s):
return (_strip_invalid_chars(s)
.replace("&", "&")
.replace("<", "<")
.replace("]]>", "]]>")
.replace('"', """)
.replace("\t", "	")
.replace("\n", "
"))
class JUnitXmlResult(unittest.TestResult):
"""A TestResult which outputs JUnit compatible XML."""
def __init__(self, stream):
"""Create a JUnitXmlResult.
:param stream: A stream to write results to. Note that due to the
nature of JUnit XML output, nnothing will be written to the stream
until stopTestRun() is called.
"""
self.__super = super(JUnitXmlResult, self)
self.__super.__init__()
# GZ 2010-09-03: We have a problem if passed a text stream in Python 3
# as really we want to write raw UTF-8 to ensure that
# the encoding is not mangled later
self._stream = stream
self._results = []
self._set_time = None
self._test_start = None
self._run_start = None
self._tz_info = None
def startTestRun(self):
"""Start a test run."""
self._run_start = self._now()
def _get_tzinfo(self):
if self._tz_info is None:
self._tz_info = LocalTimezone()
return self._tz_info
def _now(self):
if self._set_time is not None:
return self._set_time
else:
return datetime.datetime.now(self._get_tzinfo())
def time(self, a_datetime):
self._set_time = a_datetime
if (self._run_start is not None and
self._run_start > a_datetime):
self._run_start = a_datetime
def startTest(self, test):
self.__super.startTest(test)
self._test_start = self._now()
def _duration(self, from_datetime):
try:
delta = self._now() - from_datetime
except TypeError:
n = self._now()
delta = datetime.timedelta(-1)
seconds = delta.days * 3600*24 + delta.seconds
return seconds + 0.000001 * delta.microseconds
def _test_case_string(self, test):
duration = self._duration(self._test_start)
test_id = test.id()
# Split on the last dot not inside a parameter
class_end = test_id.rfind(".", 0, test_id.find("("))
if class_end == -1:
classname, name = "", test_id
else:
classname, name = test_id[:class_end], test_id[class_end+1:]
self._results.append('<testcase classname="%s" name="%s" '
'time="%0.3f"' % (_escape_attr(classname), _escape_attr(name), duration))
def stopTestRun(self):
"""Stop a test run.
This allows JUnitXmlResult to output the XML representation of the test
run.
"""
duration = self._duration(self._run_start)
self._stream.write('<testsuite errors="%d" failures="%d" name="" '
'tests="%d" time="%0.3f">\n' % (len(self.errors),
len(self.failures) + len(getattr(self, "unexpectedSuccesses", ())),
self.testsRun, duration))
self._stream.write(''.join(self._results))
self._stream.write('</testsuite>\n')
def addError(self, test, error):
self.__super.addError(test, error)
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<error type="%s">%s</error>\n</testcase>\n' % (
_escape_attr(_error_name(error[0])),
_escape_content(self._exc_info_to_string(error, test))))
def addFailure(self, test, error):
self.__super.addFailure(test, error)
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<failure type="%s">%s</failure>\n</testcase>\n' %
(_escape_attr(_error_name(error[0])),
_escape_content(self._exc_info_to_string(error, test))))
def addSuccess(self, test):
self.__super.addSuccess(test)
self._test_case_string(test)
self._results.append('/>\n')
def addSkip(self, test, reason):
try:
self.__super.addSkip(test, reason)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<skip>%s</skip>\n</testcase>\n'% _escape_attr(reason))
def addUnexpectedSuccess(self, test):
try:
self.__super.addUnexpectedSuccess(test)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<failure type="unittest.case._UnexpectedSuccess"/>\n</testcase>\n')
def addExpectedFailure(self, test, error):
try:
self.__super.addExpectedFailure(test, error)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('/>\n')
| gpl-2.0 |
GehenHe/Recognize-Face-on-Android | tensorflow/contrib/learn/python/learn/estimators/head_test.py | 7 | 33440 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
import six
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _assert_variables(test_case,
expected_global=None,
expected_model=None,
expected_trainable=None):
test_case.assertItemsEqual([] if expected_global is None else expected_global,
[k.name for k in variables.global_variables()])
test_case.assertItemsEqual([] if expected_model is None else expected_model,
[k.name for k in variables.model_variables()])
test_case.assertItemsEqual([] if expected_trainable is None else
expected_trainable,
[k.name for k in variables.trainable_variables()])
def _assert_no_variables(test_case):
_assert_variables(test_case)
# This must be called from within a tf.Session.
def _assert_metrics(test_case, expected_loss, expected_eval_metrics,
model_fn_ops):
test_case.assertAlmostEqual(expected_loss, model_fn_ops.loss.eval(), places=4)
for k in six.iterkeys(expected_eval_metrics):
test_case.assertIn(k, six.iterkeys(model_fn_ops.eval_metric_ops))
variables.initialize_local_variables().run()
for key, expected_value in six.iteritems(expected_eval_metrics):
value_tensor, update_tensor = model_fn_ops.eval_metric_ops[key]
update = update_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
update,
places=4,
msg="%s: update, expected %s, got %s." % (key, expected_value, update))
value = value_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
value,
places=4,
msg="%s: value, expected %s, got %s." % (key, expected_value, value))
# This must be called from within a tf.Session.
def _assert_summary_tags(test_case, expected_tags=None):
actual_tags = []
for summary_op in ops.get_collection(ops.GraphKeys.SUMMARIES):
summ = summary_pb2.Summary()
summ.ParseFromString(summary_op.eval())
actual_tags.append(summ.value[0].tag)
test_case.assertItemsEqual(expected_tags or [], actual_tags)
def _sigmoid(x):
return 1. / (1. + math.exp(-1 * x))
class RegressionModelHeadTest(test.TestCase):
# TODO(zakaria): test multilabel regression.
def testRegression(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionEvalMode(self):
head = head_lib._regression_head()
with ops.Graph().as_default(), session.Session():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=prediction)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithLabelName(self):
label_name = "my_label"
head = head_lib._regression_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = {label_name: constant_op.constant([[0.], [1.], [1.]])}
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithWeights(self):
head = head_lib._regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((2.,), (5.,), (0.,))
features = {"label_weight": constant_op.constant(weights)}
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / len(weights), {"loss": 2. / np.sum(weights)},
model_fn_ops)
def testRegressionWithCenteredBias(self):
head = head_lib._regression_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = constant_op.constant([[0.], [1.], [1.]])
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testErrorInSparseTensorLabels(self):
head = head_lib._regression_head()
with ops.Graph().as_default():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = sparse_tensor.SparseTensor(
indices=constant_op.constant(
[[0, 0], [1, 0], [2, 0]], dtype=dtypes.int64),
values=constant_op.constant([0., 1., 1.]),
dense_shape=[3, 1])
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported as labels."):
head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
class MultiLabelModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = ((0, 0, 1),)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 1. / 3,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": _sigmoid(self._logits[0][0]),
"labels/probability_mean/class1": _sigmoid(self._logits[0][1]),
"labels/probability_mean/class2": _sigmoid(self._logits[0][2]),
}
def testMultiLabel(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelEvalMode(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant([[1., 0., 0.]])
labels = constant_op.constant([[0, 0, 1]])
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithLabelName(self):
n_classes = 3
label_name = "my_label"
head = head_lib._multi_label_head(
n_classes=n_classes,
label_name=label_name,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant([[1., 0., 0.]])
labels = {label_name: constant_op.constant([[0, 0, 1]])}
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithWeight(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
features = {"label_weight": constant_op.constant(.1)}
logits = constant_op.constant([[1., 0., 0.]])
labels = constant_op.constant([[0, 0, 1]])
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, .089985214,
self._expected_eval_metrics(2.69956), model_fn_ops)
def testMultiLabelWithCenteredBias(self):
n_classes = 3
head = head_lib._multi_label_head(
n_classes=n_classes,
enable_centered_bias=True,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant([[1., 0., 0.]])
labels = constant_op.constant([[0, 0, 1]])
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, [
"loss", "centered_bias/bias_0", "centered_bias/bias_1",
"centered_bias/bias_2"
])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class BinaryClassificationModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1.,), (1.,))
self._labels = ((1.,), (0.,))
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": np.mean(self._labels),
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": np.mean(self._labels),
"labels/prediction_mean": .731059, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 1. / 2,
"recall/positive_threshold_0.500000_mean": 1. / 1,
}
def testBinaryClassification(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationEvalMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationInferMode(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.INFER, _noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
self.assertEquals(1, len(model_fn_ops.output_alternatives))
self.assertEquals(constants.ProblemType.LOGISTIC_REGRESSION,
model_fn_ops.output_alternatives[None][0])
def testErrorInSparseTensorLabels(self):
n_classes = 2
head = head_lib._multi_class_head(n_classes=n_classes)
with ops.Graph().as_default():
prediction = constant_op.constant([[1.], [1.], [3.]])
labels = sparse_tensor.SparseTensor(
indices=constant_op.constant(
[[0, 0], [1, 0], [2, 0]], dtype=dtypes.int64),
values=constant_op.constant([0, 1, 1]),
dense_shape=[3, 1])
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported as labels."):
head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=prediction)
def testBinaryClassificationWithLabelName(self):
label_name = "my_label"
head = head_lib._multi_class_head(n_classes=2, label_name=label_name)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = {label_name: constant_op.constant(self._labels)}
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithWeights(self):
n_classes = 2
head = head_lib._multi_class_head(
n_classes=n_classes, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((1.,), (0.,))
features = {"label_weight": constant_op.constant(weights)}
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_total_loss = .31326166
_assert_metrics(
self,
expected_total_loss / len(weights),
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
# TODO(ptucker): Is this the correct eval loss, sum not average?
"loss": expected_total_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWithCenteredBias(self):
head = head_lib._multi_class_head(n_classes=2, enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class MultiClassModelHeadTest(test.TestCase):
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = (2,)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 0.,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": 0.576117, # softmax
"labels/probability_mean/class1": 0.211942, # softmax
"labels/probability_mean/class2": 0.211942, # softmax
}
def testMultiClass(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.TRAIN, _noop_train_op, logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassEvalMode(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
{}, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=logits)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithWeight(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
weight = .1
features = {"label_weight": constant_op.constant(weight)}
logits = constant_op.constant(self._logits)
labels = constant_op.constant(self._labels)
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514446
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testInvalidNClasses(self):
for n_classes in (None, -1, 0, 1):
with self.assertRaisesRegexp(ValueError, "n_classes must be > 1"):
head_lib._multi_class_head(n_classes=n_classes)
class BinarySvmModelHeadTest(test.TestCase):
def setUp(self):
# Prediction for first example is in the right side of the hyperplane
# (i.e., < 0) but it is within the [-1,1] margin. There is a 0.5 loss
# incurred by this example. The 2nd prediction is outside the margin so it
# incurs no loss at all.
self._predictions = ((-.5,), (1.2,))
self._labels = (0, 1)
self._expected_losses = (.5, 0.)
def testBinarySVMDefaultWeights(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = constant_op.constant(self._labels)
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMEvalMode(self):
head = head_lib._binary_svm_head()
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = constant_op.constant(self._labels)
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.EVAL,
_noop_train_op,
logits=predictions)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLabelName(self):
label_name = "my_label"
head = head_lib._binary_svm_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = {label_name: constant_op.constant(self._labels)}
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithWeights(self):
head = head_lib._binary_svm_head(weight_column_name="weights")
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = constant_op.constant(self._labels)
weights = (7., 11.)
features = {"weights": constant_op.constant(weights)}
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=predictions)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_weighted_sum = np.sum(
np.multiply(weights, self._expected_losses))
_assert_metrics(self, expected_weighted_sum / len(weights), {
"accuracy": 1.,
"loss": expected_weighted_sum / np.sum(weights),
}, model_fn_ops)
def testBinarySVMWithCenteredBias(self):
head = head_lib._binary_svm_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
predictions = constant_op.constant(self._predictions)
labels = constant_op.constant(self._labels)
model_fn_ops = head.head_ops(
{},
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=predictions)
_assert_variables(
self,
expected_global=(
"centered_bias_weight:0",
"centered_bias_weight/Adagrad:0",),
expected_trainable=("centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss", "centered_bias/bias_0"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
class MultiHeadTest(test.TestCase):
def testTrain_withNoHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head([head1, head2])
logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
labels = {
"label1": constant_op.constant([1]),
"label2": constant_op.constant([1])
}
features = {"weights": constant_op.constant([2.0, 10.0])}
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
self.assertEquals(None, model_fn_ops.predictions)
self.assertTrue(model_fn_ops.loss is not None)
self.assertTrue(model_fn_ops.train_op is not None)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertEquals(None, model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testTrain_withHeadWeights(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head([head1, head2], [1, .5])
logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
labels = {
"label1": constant_op.constant([1]),
"label2": constant_op.constant([1])
}
features = {"weights": constant_op.constant([2.0, 10.0])}
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.TRAIN,
_noop_train_op,
logits=logits)
self.assertEquals(None, model_fn_ops.predictions)
self.assertTrue(model_fn_ops.loss is not None)
self.assertTrue(model_fn_ops.train_op is not None)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertEquals(None, model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(1.531, sess.run(model_fn_ops.loss), places=3)
def testInfer(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head([head1, head2], [1, .5])
logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
labels = {
"label1": constant_op.constant([1]),
"label2": constant_op.constant([1])
}
features = {"weights": constant_op.constant([2.0, 10.0])}
model_fn_ops = head.head_ops(
features,
labels,
model_fn.ModeKeys.INFER,
_noop_train_op,
logits=logits)
self.assertTrue(model_fn_ops.predictions)
self.assertEquals(None, model_fn_ops.loss)
self.assertEquals(None, model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertTrue(len(model_fn_ops.output_alternatives) == 2)
# Tests predictions keys
pred_keys = model_fn_ops.predictions.keys()
self.assertTrue(
("head1", prediction_key.PredictionKey.PROBABILITIES) in pred_keys)
self.assertTrue(
("head1", prediction_key.PredictionKey.CLASSES) in pred_keys)
self.assertTrue(
("head2", prediction_key.PredictionKey.PROBABILITIES) in pred_keys)
self.assertTrue(
("head2", prediction_key.PredictionKey.CLASSES) in pred_keys)
# Tests output alternative
out_alts = model_fn_ops.output_alternatives
self.assertEquals(constants.ProblemType.CLASSIFICATION,
out_alts["head1"][0])
self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in
out_alts["head1"][1].keys())
self.assertTrue(
prediction_key.PredictionKey.CLASSES in out_alts["head1"][1].keys())
self.assertEquals(constants.ProblemType.CLASSIFICATION,
out_alts["head2"][0])
self.assertTrue(prediction_key.PredictionKey.PROBABILITIES in
out_alts["head2"][1].keys())
self.assertTrue(
prediction_key.PredictionKey.CLASSES in out_alts["head2"][1].keys())
def testEval(self):
head1 = head_lib._multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib._multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib._multi_head([head1, head2], [1, .5])
logits = constant_op.constant([[-0.7, 0.2, .1, .1, .1, .1, .1]])
labels = {
"label1": constant_op.constant([1]),
"label2": constant_op.constant([1])
}
features = {"weights": constant_op.constant([2.0, 10.0])}
model_fn_ops = head.head_ops(
features, labels, model_fn.ModeKeys.EVAL, _noop_train_op, logits=logits)
self.assertTrue(model_fn_ops.predictions)
self.assertTrue(model_fn_ops.loss is not None)
self.assertEquals(None, model_fn_ops.train_op)
self.assertTrue(model_fn_ops.eval_metric_ops)
self.assertEquals(None, model_fn_ops.output_alternatives)
metric_ops = model_fn_ops.eval_metric_ops
# Tests eval keys
self.assertTrue("accuracy/head1" in metric_ops.keys())
self.assertTrue("accuracy/head2" in metric_ops.keys())
def _noop_train_op(unused_loss):
return control_flow_ops.no_op()
if __name__ == "__main__":
test.main()
| apache-2.0 |
shravan-achar/servo | components/script/dom/bindings/codegen/parser/tests/test_attr_sequence_type.py | 276 | 1626 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface AttrSequenceType {
attribute sequence<object> foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Attribute type must not be a sequence type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrUnionWithSequenceType {
attribute (sequence<object> or DOMString) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union with a sequence member type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrNullableUnionWithSequenceType {
attribute (sequence<object>? or DOMString) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union with a nullable sequence "
"member type")
parser.reset()
threw = False
try:
parser.parse("""
interface AttrUnionWithUnionWithSequenceType {
attribute ((sequence<object> or DOMString) or AttrUnionWithUnionWithSequenceType) foo;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw,
"Attribute type must not be a union type with a union member "
"type that has a sequence member type")
| mpl-2.0 |
meteorcloudy/tensorflow | tensorflow/python/lib/io/file_io.py | 23 | 18951 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""File IO methods that wrap the C++ FileSystem API.
The C++ FileSystem API is SWIG wrapped in file_io.i. These functions call those
to accomplish basic File IO operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import os
import uuid
import six
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# A good default block size depends on the system in question.
# A somewhat conservative default chosen here.
_DEFAULT_BLOCK_SIZE = 16 * 1024 * 1024
class FileIO(object):
"""FileIO class that exposes methods to read / write to / from files.
The constructor takes the following arguments:
name: name of the file
mode: one of 'r', 'w', 'a', 'r+', 'w+', 'a+'. Append 'b' for bytes mode.
Can be used as an iterator to iterate over lines in the file.
The default buffer size used for the BufferedInputStream used for reading
the file line by line is 1024 * 512 bytes.
"""
def __init__(self, name, mode):
self.__name = name
self.__mode = mode
self._read_buf = None
self._writable_file = None
self._binary_mode = "b" in mode
mode = mode.replace("b", "")
if mode not in ("r", "w", "a", "r+", "w+", "a+"):
raise errors.InvalidArgumentError(
None, None, "mode is not 'r' or 'w' or 'a' or 'r+' or 'w+' or 'a+'")
self._read_check_passed = mode in ("r", "r+", "a+", "w+")
self._write_check_passed = mode in ("a", "w", "r+", "a+", "w+")
@property
def name(self):
"""Returns the file name."""
return self.__name
@property
def mode(self):
"""Returns the mode in which the file was opened."""
return self.__mode
def _preread_check(self):
if not self._read_buf:
if not self._read_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for reading")
with errors.raise_exception_on_not_ok_status() as status:
self._read_buf = pywrap_tensorflow.CreateBufferedInputStream(
compat.as_bytes(self.__name), 1024 * 512, status)
def _prewrite_check(self):
if not self._writable_file:
if not self._write_check_passed:
raise errors.PermissionDeniedError(None, None,
"File isn't open for writing")
with errors.raise_exception_on_not_ok_status() as status:
self._writable_file = pywrap_tensorflow.CreateWritableFile(
compat.as_bytes(self.__name), compat.as_bytes(self.__mode), status)
def _prepare_value(self, val):
if self._binary_mode:
return compat.as_bytes(val)
else:
return compat.as_str_any(val)
def size(self):
"""Returns the size of the file."""
return stat(self.__name).length
def write(self, file_content):
"""Writes file_content to the file. Appends to the end of the file."""
self._prewrite_check()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.AppendToFile(
compat.as_bytes(file_content), self._writable_file, status)
def read(self, n=-1):
"""Returns the contents of a file as a string.
Starts reading from current position in file.
Args:
n: Read 'n' bytes if n != -1. If n = -1, reads to end of file.
Returns:
'n' bytes of the file (or whole file) in bytes mode or 'n' bytes of the
string if in string (regular) mode.
"""
self._preread_check()
with errors.raise_exception_on_not_ok_status() as status:
if n == -1:
length = self.size() - self.tell()
else:
length = n
return self._prepare_value(
pywrap_tensorflow.ReadFromStream(self._read_buf, length, status))
@deprecation.deprecated_args(
None,
"position is deprecated in favor of the offset argument.",
"position")
def seek(self, offset=None, whence=0, position=None):
# TODO(jhseu): Delete later. Used to omit `position` from docs.
# pylint: disable=g-doc-args
"""Seeks to the offset in the file.
Args:
offset: The byte count relative to the whence argument.
whence: Valid values for whence are:
0: start of the file (default)
1: relative to the current position of the file
2: relative to the end of file. offset is usually negative.
"""
# pylint: enable=g-doc-args
self._preread_check()
# We needed to make offset a keyword argument for backwards-compatibility.
# This check exists so that we can convert back to having offset be a
# positional argument.
# TODO(jhseu): Make `offset` a positional argument after `position` is
# deleted.
if offset is None and position is None:
raise TypeError("seek(): offset argument required")
if offset is not None and position is not None:
raise TypeError("seek(): offset and position may not be set "
"simultaneously.")
if position is not None:
offset = position
with errors.raise_exception_on_not_ok_status() as status:
if whence == 0:
pass
elif whence == 1:
offset += self.tell()
elif whence == 2:
offset += self.size()
else:
raise errors.InvalidArgumentError(
None, None,
"Invalid whence argument: {}. Valid values are 0, 1, or 2."
.format(whence))
ret_status = self._read_buf.Seek(offset)
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def readline(self):
r"""Reads the next line from the file. Leaves the '\n' at the end."""
self._preread_check()
return self._prepare_value(self._read_buf.ReadLineAsString())
def readlines(self):
"""Returns all lines from the file in a list."""
self._preread_check()
lines = []
while True:
s = self.readline()
if not s:
break
lines.append(s)
return lines
def tell(self):
"""Returns the current position in the file."""
self._preread_check()
return self._read_buf.Tell()
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.close()
def __iter__(self):
return self
def next(self):
retval = self.readline()
if not retval:
raise StopIteration()
return retval
def __next__(self):
return self.next()
def flush(self):
"""Flushes the Writable file.
This only ensures that the data has made its way out of the process without
any guarantees on whether it's written to disk. This means that the
data would survive an application crash but not necessarily an OS crash.
"""
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Flush()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
def close(self):
"""Closes FileIO. Should be called for the WritableFile to be flushed."""
self._read_buf = None
if self._writable_file:
with errors.raise_exception_on_not_ok_status() as status:
ret_status = self._writable_file.Close()
pywrap_tensorflow.Set_TF_Status_from_Status(status, ret_status)
self._writable_file = None
@tf_export("gfile.Exists")
def file_exists(filename):
"""Determines whether a path exists or not.
Args:
filename: string, a path
Returns:
True if the path exists, whether its a file or a directory.
False if the path does not exist and there are no filesystem errors.
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API.
"""
try:
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.FileExists(compat.as_bytes(filename), status)
except errors.NotFoundError:
return False
return True
@tf_export("gfile.Remove")
def delete_file(filename):
"""Deletes the file located at 'filename'.
Args:
filename: string, a filename
Raises:
errors.OpError: Propagates any errors reported by the FileSystem API. E.g.,
NotFoundError if the file does not exist.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteFile(compat.as_bytes(filename), status)
def read_file_to_string(filename, binary_mode=False):
"""Reads the entire contents of a file to a string.
Args:
filename: string, path to a file
binary_mode: whether to open the file in binary mode or not. This changes
the type of the object returned.
Returns:
contents of the file as a string or bytes.
Raises:
errors.OpError: Raises variety of errors that are subtypes e.g.
NotFoundError etc.
"""
if binary_mode:
f = FileIO(filename, mode="rb")
else:
f = FileIO(filename, mode="r")
return f.read()
def write_string_to_file(filename, file_content):
"""Writes a string to a given file.
Args:
filename: string, path to a file
file_content: string, contents that need to be written to the file
Raises:
errors.OpError: If there are errors during the operation.
"""
with FileIO(filename, mode="w") as f:
f.write(file_content)
@tf_export("gfile.Glob")
def get_matching_files(filename):
"""Returns a list of files that match the given pattern(s).
Args:
filename: string or iterable of strings. The glob pattern(s).
Returns:
A list of strings containing filenames that match the given pattern(s).
Raises:
errors.OpError: If there are filesystem / directory listing errors.
"""
with errors.raise_exception_on_not_ok_status() as status:
if isinstance(filename, six.string_types):
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for matching_filename in pywrap_tensorflow.GetMatchingFiles(
compat.as_bytes(filename), status)
]
else:
return [
# Convert the filenames to string from bytes.
compat.as_str_any(matching_filename)
for single_filename in filename
for matching_filename in pywrap_tensorflow.GetMatchingFiles(
compat.as_bytes(single_filename), status)
]
@tf_export("gfile.MkDir")
def create_dir(dirname):
"""Creates a directory with the name 'dirname'.
Args:
dirname: string, name of the directory to be created
Notes:
The parent directories need to exist. Use recursive_create_dir instead if
there is the possibility that the parent dirs don't exist.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CreateDir(compat.as_bytes(dirname), status)
@tf_export("gfile.MakeDirs")
def recursive_create_dir(dirname):
"""Creates a directory and all parent/intermediate directories.
It succeeds if dirname already exists and is writable.
Args:
dirname: string, name of the directory to be created
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RecursivelyCreateDir(compat.as_bytes(dirname), status)
@tf_export("gfile.Copy")
def copy(oldpath, newpath, overwrite=False):
"""Copies data from oldpath to newpath.
Args:
oldpath: string, name of the file who's contents need to be copied
newpath: string, name of the file to which to copy to
overwrite: boolean, if false its an error for newpath to be occupied by an
existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.CopyFile(
compat.as_bytes(oldpath), compat.as_bytes(newpath), overwrite, status)
@tf_export("gfile.Rename")
def rename(oldname, newname, overwrite=False):
"""Rename or move a file / directory.
Args:
oldname: string, pathname for a file
newname: string, pathname to which the file needs to be moved
overwrite: boolean, if false it's an error for `newname` to be occupied by
an existing file.
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.RenameFile(
compat.as_bytes(oldname), compat.as_bytes(newname), overwrite, status)
def atomic_write_string_to_file(filename, contents, overwrite=True):
"""Writes to `filename` atomically.
This means that when `filename` appears in the filesystem, it will contain
all of `contents`. With write_string_to_file, it is possible for the file
to appear in the filesystem with `contents` only partially written.
Accomplished by writing to a temp file and then renaming it.
Args:
filename: string, pathname for a file
contents: string, contents that need to be written to the file
overwrite: boolean, if false it's an error for `filename` to be occupied by
an existing file.
"""
temp_pathname = filename + ".tmp" + uuid.uuid4().hex
write_string_to_file(temp_pathname, contents)
try:
rename(temp_pathname, filename, overwrite)
except errors.OpError:
delete_file(temp_pathname)
raise
@tf_export("gfile.DeleteRecursively")
def delete_recursively(dirname):
"""Deletes everything under dirname recursively.
Args:
dirname: string, a path to a directory
Raises:
errors.OpError: If the operation fails.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.DeleteRecursively(compat.as_bytes(dirname), status)
@tf_export("gfile.IsDirectory")
def is_directory(dirname):
"""Returns whether the path is a directory or not.
Args:
dirname: string, path to a potential directory
Returns:
True, if the path is a directory; False otherwise
"""
status = c_api_util.ScopedTFStatus()
return pywrap_tensorflow.IsDirectory(compat.as_bytes(dirname), status)
@tf_export("gfile.ListDirectory")
def list_directory(dirname):
"""Returns a list of entries contained within a directory.
The list is in arbitrary order. It does not contain the special entries "."
and "..".
Args:
dirname: string, path to a directory
Returns:
[filename1, filename2, ... filenameN] as strings
Raises:
errors.NotFoundError if directory doesn't exist
"""
if not is_directory(dirname):
raise errors.NotFoundError(None, None, "Could not find directory")
with errors.raise_exception_on_not_ok_status() as status:
# Convert each element to string, since the return values of the
# vector of string should be interpreted as strings, not bytes.
return [
compat.as_str_any(filename)
for filename in pywrap_tensorflow.GetChildren(
compat.as_bytes(dirname), status)
]
@tf_export("gfile.Walk")
def walk(top, in_order=True):
"""Recursive directory tree generator for directories.
Args:
top: string, a Directory name
in_order: bool, Traverse in order if True, post order if False.
Errors that happen while listing directories are ignored.
Yields:
Each yield is a 3-tuple: the pathname of a directory, followed by lists of
all its subdirectories and leaf files.
(dirname, [subdirname, subdirname, ...], [filename, filename, ...])
as strings
"""
top = compat.as_str_any(top)
try:
listing = list_directory(top)
except errors.NotFoundError:
return
files = []
subdirs = []
for item in listing:
full_path = os.path.join(top, item)
if is_directory(full_path):
subdirs.append(item)
else:
files.append(item)
here = (top, subdirs, files)
if in_order:
yield here
for subdir in subdirs:
for subitem in walk(os.path.join(top, subdir), in_order):
yield subitem
if not in_order:
yield here
@tf_export("gfile.Stat")
def stat(filename):
"""Returns file statistics for a given path.
Args:
filename: string, path to a file
Returns:
FileStatistics struct that contains information about the path
Raises:
errors.OpError: If the operation fails.
"""
file_statistics = pywrap_tensorflow.FileStatistics()
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.Stat(compat.as_bytes(filename), file_statistics, status)
return file_statistics
def filecmp(filename_a, filename_b):
"""Compare two files, returning True if they are the same, False otherwise.
We check size first and return False quickly if the files are different sizes.
If they are the same size, we continue to generating a crc for the whole file.
You might wonder: why not use Python's filecmp.cmp() instead? The answer is
that the builtin library is not robust to the many different filesystems
TensorFlow runs on, and so we here perform a similar comparison with
the more robust FileIO.
Args:
filename_a: string path to the first file.
filename_b: string path to the second file.
Returns:
True if the files are the same, False otherwise.
"""
size_a = FileIO(filename_a, "rb").size()
size_b = FileIO(filename_b, "rb").size()
if size_a != size_b:
return False
# Size is the same. Do a full check.
crc_a = file_crc32(filename_a)
crc_b = file_crc32(filename_b)
return crc_a == crc_b
def file_crc32(filename, block_size=_DEFAULT_BLOCK_SIZE):
"""Get the crc32 of the passed file.
The crc32 of a file can be used for error checking; two files with the same
crc32 are considered equivalent. Note that the entire file must be read
to produce the crc32.
Args:
filename: string, path to a file
block_size: Integer, process the files by reading blocks of `block_size`
bytes. Use -1 to read the file as once.
Returns:
hexadecimal as string, the crc32 of the passed file.
"""
crc = 0
with FileIO(filename, mode="rb") as f:
chunk = f.read(n=block_size)
while chunk:
crc = binascii.crc32(chunk, crc)
chunk = f.read(n=block_size)
return hex(crc & 0xFFFFFFFF)
| apache-2.0 |
vileopratama/vitech | src/addons/gamification/models/badge.py | 23 | 13043 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.translate import _
from datetime import date
import logging
from openerp.exceptions import UserError
_logger = logging.getLogger(__name__)
class gamification_badge_user(osv.Model):
"""User having received a badge"""
_name = 'gamification.badge.user'
_description = 'Gamification user badge'
_order = "create_date desc"
_rec_name = "badge_name"
_columns = {
'user_id': fields.many2one('res.users', string="User", required=True, ondelete="cascade", select=True),
'sender_id': fields.many2one('res.users', string="Sender", help="The user who has send the badge"),
'badge_id': fields.many2one('gamification.badge', string='Badge', required=True, ondelete="cascade", select=True),
'challenge_id': fields.many2one('gamification.challenge', string='Challenge originating', help="If this badge was rewarded through a challenge"),
'comment': fields.text('Comment'),
'badge_name': fields.related('badge_id', 'name', type="char", string="Badge Name"),
'create_date': fields.datetime('Created', readonly=True),
'create_uid': fields.many2one('res.users', string='Creator', readonly=True),
}
def _send_badge(self, cr, uid, ids, context=None):
"""Send a notification to a user for receiving a badge
Does not verify constrains on badge granting.
The users are added to the owner_ids (create badge_user if needed)
The stats counters are incremented
:param ids: list(int) of badge users that will receive the badge
"""
res = True
temp_obj = self.pool.get('mail.template')
user_obj = self.pool.get('res.users')
template_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'gamification', 'email_template_badge_received')[1]
for badge_user in self.browse(cr, uid, ids, context=context):
template = temp_obj.get_email_template(cr, uid, template_id, badge_user.id, context=context)
body_html = temp_obj.render_template(cr, uid, template.body_html, 'gamification.badge.user', badge_user.id, context=template._context)
res = user_obj.message_post(
cr, uid, badge_user.user_id.id,
body=body_html,
subtype='gamification.mt_badge_granted',
partner_ids=[badge_user.user_id.partner_id.id],
context=context)
return res
def create(self, cr, uid, vals, context=None):
self.pool.get('gamification.badge').check_granting(cr, uid, badge_id=vals.get('badge_id'), context=context)
return super(gamification_badge_user, self).create(cr, uid, vals, context=context)
class gamification_badge(osv.Model):
"""Badge object that users can send and receive"""
CAN_GRANT = 1
NOBODY_CAN_GRANT = 2
USER_NOT_VIP = 3
BADGE_REQUIRED = 4
TOO_MANY = 5
_name = 'gamification.badge'
_description = 'Gamification badge'
_inherit = ['mail.thread']
def _get_owners_info(self, cr, uid, ids, name, args, context=None):
"""Return:
the list of unique res.users ids having received this badge
the total number of time this badge was granted
the total number of users this badge was granted to
"""
result = dict((res_id, {'stat_count': 0, 'stat_count_distinct': 0, 'unique_owner_ids': []}) for res_id in ids)
cr.execute("""
SELECT badge_id, count(user_id) as stat_count,
count(distinct(user_id)) as stat_count_distinct,
array_agg(distinct(user_id)) as unique_owner_ids
FROM gamification_badge_user
WHERE badge_id in %s
GROUP BY badge_id
""", (tuple(ids),))
for (badge_id, stat_count, stat_count_distinct, unique_owner_ids) in cr.fetchall():
result[badge_id] = {
'stat_count': stat_count,
'stat_count_distinct': stat_count_distinct,
'unique_owner_ids': unique_owner_ids,
}
return result
def _get_badge_user_stats(self, cr, uid, ids, name, args, context=None):
"""Return stats related to badge users"""
result = dict.fromkeys(ids, False)
badge_user_obj = self.pool.get('gamification.badge.user')
first_month_day = date.today().replace(day=1).strftime(DF)
for bid in ids:
result[bid] = {
'stat_my': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid)], context=context, count=True),
'stat_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_date', '>=', first_month_day)], context=context, count=True),
'stat_my_this_month': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('user_id', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True),
'stat_my_monthly_sending': badge_user_obj.search(cr, uid, [('badge_id', '=', bid), ('create_uid', '=', uid), ('create_date', '>=', first_month_day)], context=context, count=True)
}
return result
def _remaining_sending_calc(self, cr, uid, ids, name, args, context=None):
"""Computes the number of badges remaining the user can send
0 if not allowed or no remaining
integer if limited sending
-1 if infinite (should not be displayed)
"""
result = dict.fromkeys(ids, False)
for badge in self.browse(cr, uid, ids, context=context):
if self._can_grant_badge(cr, uid, badge.id, context) != 1:
# if the user cannot grant this badge at all, result is 0
result[badge.id] = 0
elif not badge.rule_max:
# if there is no limitation, -1 is returned which means 'infinite'
result[badge.id] = -1
else:
result[badge.id] = badge.rule_max_number - badge.stat_my_monthly_sending
return result
_columns = {
'name': fields.char('Badge', required=True, translate=True),
'description': fields.text('Description', translate=True),
'image': fields.binary("Image", attachment=True,
help="This field holds the image used for the badge, limited to 256x256"),
'rule_auth': fields.selection([
('everyone', 'Everyone'),
('users', 'A selected list of users'),
('having', 'People having some badges'),
('nobody', 'No one, assigned through challenges'),
],
string="Allowance to Grant",
help="Who can grant this badge",
required=True),
'rule_auth_user_ids': fields.many2many('res.users', 'rel_badge_auth_users',
string='Authorized Users',
help="Only these people can give this badge"),
'rule_auth_badge_ids': fields.many2many('gamification.badge',
'gamification_badge_rule_badge_rel', 'badge1_id', 'badge2_id',
string='Required Badges',
help="Only the people having these badges can give this badge"),
'rule_max': fields.boolean('Monthly Limited Sending',
help="Check to set a monthly limit per person of sending this badge"),
'rule_max_number': fields.integer('Limitation Number',
help="The maximum number of time this badge can be sent per month per person."),
'stat_my_monthly_sending': fields.function(_get_badge_user_stats,
type="integer",
string='My Monthly Sending Total',
multi='badge_users',
help="The number of time the current user has sent this badge this month."),
'remaining_sending': fields.function(_remaining_sending_calc, type='integer',
string='Remaining Sending Allowed', help="If a maxium is set"),
'challenge_ids': fields.one2many('gamification.challenge', 'reward_id',
string="Reward of Challenges"),
'goal_definition_ids': fields.many2many('gamification.goal.definition', 'badge_unlocked_definition_rel',
string='Rewarded by',
help="The users that have succeeded theses goals will receive automatically the badge."),
'owner_ids': fields.one2many('gamification.badge.user', 'badge_id',
string='Owners', help='The list of instances of this badge granted to users'),
'active': fields.boolean('Active'),
'unique_owner_ids': fields.function(_get_owners_info,
string='Unique Owners',
help="The list of unique users having received this badge.",
multi='unique_users',
type="many2many", relation="res.users"),
'stat_count': fields.function(_get_owners_info, string='Total',
type="integer",
multi='stat_users',
help="The number of time this badge has been received."),
'stat_count_distinct': fields.function(_get_owners_info,
type="integer",
string='Number of users',
multi='stat_users',
help="The number of time this badge has been received by unique users."),
'stat_this_month': fields.function(_get_badge_user_stats,
type="integer",
string='Monthly total',
multi='badge_users',
help="The number of time this badge has been received this month."),
'stat_my': fields.function(_get_badge_user_stats, string='My Total',
type="integer",
multi='badge_users',
help="The number of time the current user has received this badge."),
'stat_my_this_month': fields.function(_get_badge_user_stats,
type="integer",
string='My Monthly Total',
multi='badge_users',
help="The number of time the current user has received this badge this month."),
}
_defaults = {
'rule_auth': 'everyone',
'active': True,
}
def check_granting(self, cr, uid, badge_id, context=None):
"""Check the user 'uid' can grant the badge 'badge_id' and raise the appropriate exception
if not
Do not check for SUPERUSER_ID
"""
status_code = self._can_grant_badge(cr, uid, badge_id, context=context)
if status_code == self.CAN_GRANT:
return True
elif status_code == self.NOBODY_CAN_GRANT:
raise UserError(_('This badge can not be sent by users.'))
elif status_code == self.USER_NOT_VIP:
raise UserError(_('You are not in the user allowed list.'))
elif status_code == self.BADGE_REQUIRED:
raise UserError(_('You do not have the required badges.'))
elif status_code == self.TOO_MANY:
raise UserError(_('You have already sent this badge too many time this month.'))
else:
_logger.exception("Unknown badge status code: %d" % int(status_code))
return False
def _can_grant_badge(self, cr, uid, badge_id, context=None):
"""Check if a user can grant a badge to another user
:param uid: the id of the res.users trying to send the badge
:param badge_id: the granted badge id
:return: integer representing the permission.
"""
if uid == SUPERUSER_ID:
return self.CAN_GRANT
badge = self.browse(cr, uid, badge_id, context=context)
if badge.rule_auth == 'nobody':
return self.NOBODY_CAN_GRANT
elif badge.rule_auth == 'users' and uid not in [user.id for user in badge.rule_auth_user_ids]:
return self.USER_NOT_VIP
elif badge.rule_auth == 'having':
all_user_badges = self.pool.get('gamification.badge.user').search(cr, uid, [('user_id', '=', uid)], context=context)
for required_badge in badge.rule_auth_badge_ids:
if required_badge.id not in all_user_badges:
return self.BADGE_REQUIRED
if badge.rule_max and badge.stat_my_monthly_sending >= badge.rule_max_number:
return self.TOO_MANY
# badge.rule_auth == 'everyone' -> no check
return self.CAN_GRANT
def check_progress(self, cr, uid, context=None):
try:
model, res_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'badge_hidden')
except ValueError:
return True
badge_user_obj = self.pool.get('gamification.badge.user')
if not badge_user_obj.search(cr, uid, [('user_id', '=', uid), ('badge_id', '=', res_id)], context=context):
values = {
'user_id': uid,
'badge_id': res_id,
}
badge_user_obj.create(cr, SUPERUSER_ID, values, context=context)
return True
| mit |
bestvibes/neo4j-social-network | mac_env/lib/python2.7/site-packages/flask/views.py | 782 | 5642 | # -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from .globals import request
from ._compat import with_metaclass
http_method_funcs = frozenset(['get', 'post', 'head', 'options',
'delete', 'put', 'trace', 'patch'])
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
"""
#: A for which methods this pluggable view can handle.
methods = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#:
#: .. versionadded:: 0.8
decorators = []
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# we attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
return view
class MethodViewType(type):
def __new__(cls, name, bases, d):
rv = type.__new__(cls, name, bases, d)
if 'methods' not in d:
methods = set(rv.methods or [])
for key in d:
if key in http_method_funcs:
methods.add(key.upper())
# if we have no method at all in there we don't want to
# add a method list. (This is for instance the case for
# the baseclass or another subclass of a base method view
# that does not introduce new methods).
if methods:
rv.methods = sorted(methods)
return rv
class MethodView(with_metaclass(MethodViewType, View)):
"""Like a regular class-based view but that dispatches requests to
particular methods. For instance if you implement a method called
:meth:`get` it means you will response to ``'GET'`` requests and
the :meth:`dispatch_request` implementation will automatically
forward your request to that. Also :attr:`options` is set for you
automatically::
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# if the request method is HEAD and we don't have a handler for it
# retry with GET
if meth is None and request.method == 'HEAD':
meth = getattr(self, 'get', None)
assert meth is not None, 'Unimplemented method %r' % request.method
return meth(*args, **kwargs)
| mit |
simontorres/goodman | goodman_pipeline/images/tests/test_image_processor.py | 2 | 1143 | from __future__ import absolute_import
from astropy.io import fits
from unittest import TestCase, skip
from ccdproc import CCDData
from ...core import NightDataContainer
from ..image_processor import ImageProcessor
from ..goodman_ccd import get_args
import numpy as np
class ImageProcessorTest(TestCase):
def setUp(self):
arguments = ['--saturation_threshold', '1']
args = get_args(arguments=arguments)
data_container = NightDataContainer(path='/fake',
instrument='Red',
technique='Spectroscopy')
self.image_processor = ImageProcessor(args=args,
data_container=data_container)
self.ccd = CCDData(data=np.ones((100, 100)),
meta=fits.Header(),
unit='adu')
self.ccd.header.set('INSTCONF', value='Red')
self.ccd.header.set('GAIN', value=1.48)
self.ccd.header.set('RDNOISE', value=3.89)
self.half_full_well = 69257
def test___call__(self):
self.image_processor()
| bsd-3-clause |
tonyrein/podderewski | src/service.py | 1 | 5118 | """
The methods in PodService deal primarily with groups of Feed
"""
from dto import Feed, Episode
from dao import FeedDao, EpisodeDao, init_database
import pd_util
class PodService(object):
@classmethod
def setup(cls):
pd_util.init_dirs()
init_database()
@classmethod
def feed_list_from_names(cls, name_list):
if name_list is None:
return None
ret_list = []
for s in name_list:
feed = cls.get_feed_by_name(s)
if feed:
ret_list.append(feed)
return ret_list
"""
Query database and make a FeedDao object for
each row. Use them to generate a list of Feed (DTO)
objects and return the list.
"""
@classmethod
def get_feeds(cls):
return Feed.get_feeds()
"""
convenience wrapper
"""
@classmethod
def get_feed_by_name(cls,name):
return Feed.get_feed_by_name(name)
"""
Get feeds matching arbitrary conditions
supplied in 'where_clause.'
"""
@classmethod
def get_feeds_where(cls, where_clause):
pass
"""
Add a feed, if there isn't already
one by that name.
"""
@classmethod
def add_feed(cls, feed_url, alt_name=None, episodes_to_keep=None):
f = Feed.init_from_url(feed_url, alt_name)
if episodes_to_keep:
f.number_to_keep = episodes_to_keep
f.save()
return f
@classmethod
def update_subscribed_feeds(cls,feeds_to_update = None):
feed_list = []
if feeds_to_update is None or len(feed_list) == 0:
feed_list = cls.get_feeds()
else:
feed_list = cls.feed_list_from_names(feeds_to_update)
for feed in feed_list:
if feed.is_subscribed:
feed.update()
@classmethod
def update_all_feeds(cls):
for feed in cls.get_feeds():
if feed.is_subscribed:
feed.update()
"""
Download episodes.
If overwrite is True, download even if episode file already exists. Default is False.
If new_only is True, do not download episodes that have already been downloaded,
even if the episode file is no longer there. Default is True.
If no feed_list is given, all subscribed feeds will be downloaded. Otherwise,
get only those subscribed feeds whose names are in feed_list.
"""
@classmethod
def download(cls, feed_list, overwrite=False, new_only=True):
feeds_to_get = []
if feed_list is None or len(feed_list) == 0:
feeds_to_get = cls.get_feeds()
else:
feeds_to_get = cls.feed_list_from_names(feed_list)
for feed in feeds_to_get:
if feed.is_subscribed:
feed.download(overwrite,new_only)
"""
If we were passed a list of feed names, build
a list of Feed objects from those names. Otherwise,
just use list of all feeds.
"""
@classmethod
def _set_subscribe(cls, state = None, feed_list = None):
feeds_to_set = []
if feed_list is None or len(feed_list) == 0: # do all
feeds_to_set = cls.get_feeds()
else: # list of names supplied
feeds_to_set = cls.feed_list_from_names(feed_list)
for feed in feeds_to_set:
if feed.is_subscribed != state:
feed.is_subscribed = state
feed.save()
"""
For all feeds, or feeds in optional list
of names, set their is_subscribed state to True.
"""
@classmethod
def subscribe(cls, feed_list = None):
cls._set_subscribe(True, feed_list)
"""
For all feeds, or feeds in optional list
of names, set their is_subscribed state to False.
"""
@classmethod
def unsubscribe(cls, feed_list = None):
cls._set_subscribe(False, feed_list)
@classmethod
def rename_feed(cls, feed_name, new_name):
feed = cls.get_feed_by_name(feed_name)
if feed:
feed.name = new_name
feed.save()
@classmethod
def set_episodes_keep_count(cls, count, feed_list = None):
feeds_to_change = []
if feed_list is None or len(feed_list) == 0:
feeds_to_change = cls.get_feeds()
else:
feeds_to_change = cls.feed_list_from_names(feed_list)
for feed in feeds_to_change:
feed.number_to_keep = count
feed.save()
@classmethod
def change_feed_descriptions(cls, new_description, feed_list = None):
feeds_to_change = []
if feed_list is None or len(feed_list) == 0:
feeds_to_change = cls.get_feeds()
else:
feeds_to_change = cls.feed_list_from_names(feed_list)
for feed in feeds_to_change:
feed.description = new_description
feed.save()
| mit |
rahul67/hue | desktop/core/ext-py/boto-2.38.0/boto/swf/layer1.py | 115 | 63928 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
import boto
from boto.connection import AWSAuthConnection
from boto.provider import Provider
from boto.exception import SWFResponseError
from boto.swf import exceptions as swf_exceptions
from boto.compat import json
#
# To get full debug output, uncomment the following line and set the
# value of Debug to be 2
#
#boto.set_stream_logger('swf')
Debug = 0
class Layer1(AWSAuthConnection):
"""
Low-level interface to Simple WorkFlow Service.
"""
DefaultRegionName = 'us-east-1'
"""The default region name for Simple Workflow."""
ServiceName = 'com.amazonaws.swf.service.model.SimpleWorkflowService'
"""The name of the Service"""
# In some cases, the fault response __type value is mapped to
# an exception class more specific than SWFResponseError.
_fault_excp = {
'com.amazonaws.swf.base.model#DomainAlreadyExistsFault':
swf_exceptions.SWFDomainAlreadyExistsError,
'com.amazonaws.swf.base.model#LimitExceededFault':
swf_exceptions.SWFLimitExceededError,
'com.amazonaws.swf.base.model#OperationNotPermittedFault':
swf_exceptions.SWFOperationNotPermittedError,
'com.amazonaws.swf.base.model#TypeAlreadyExistsFault':
swf_exceptions.SWFTypeAlreadyExistsError,
'com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault':
swf_exceptions.SWFWorkflowExecutionAlreadyStartedError,
}
ResponseError = SWFResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
debug=0, session_token=None, region=None, profile_name=None):
if not region:
region_name = boto.config.get('SWF', 'region',
self.DefaultRegionName)
for reg in boto.swf.regions():
if reg.name == region_name:
region = reg
break
self.region = region
super(Layer1, self).__init__(self.region.endpoint,
aws_access_key_id, aws_secret_access_key,
is_secure, port, proxy, proxy_port,
debug, session_token, profile_name=profile_name)
def _required_auth_capability(self):
return ['hmac-v4']
@classmethod
def _normalize_request_dict(cls, data):
"""
This class method recurses through request data dictionary and removes
any default values.
:type data: dict
:param data: Specifies request parameters with default values to be removed.
"""
for item in list(data.keys()):
if isinstance(data[item], dict):
cls._normalize_request_dict(data[item])
if data[item] in (None, {}):
del data[item]
def json_request(self, action, data, object_hook=None):
"""
This method wraps around make_request() to normalize and serialize the
dictionary with request parameters.
:type action: string
:param action: Specifies an SWF action.
:type data: dict
:param data: Specifies request parameters associated with the action.
"""
self._normalize_request_dict(data)
json_input = json.dumps(data)
return self.make_request(action, json_input, object_hook)
def make_request(self, action, body='', object_hook=None):
"""
:raises: ``SWFResponseError`` if response status is not 200.
"""
headers = {'X-Amz-Target': '%s.%s' % (self.ServiceName, action),
'Host': self.region.endpoint,
'Content-Type': 'application/json; charset=UTF-8',
'Content-Encoding': 'amz-1.0',
'Content-Length': str(len(body))}
http_request = self.build_base_http_request('POST', '/', '/',
{}, headers, body, None)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body, object_hook=object_hook)
else:
return None
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
# Certain faults get mapped to more specific exception classes.
excp_cls = self._fault_excp.get(fault_name, self.ResponseError)
raise excp_cls(response.status, response.reason, body=json_body)
# Actions related to Activities
def poll_for_activity_task(self, domain, task_list, identity=None):
"""
Used by workers to get an ActivityTask from the specified
activity taskList. This initiates a long poll, where the
service holds the HTTP connection open and responds as soon as
a task becomes available. The maximum time the service holds
on to the request before responding is 60 seconds. If no task
is available within 60 seconds, the poll will return an empty
result. An empty result, in this context, means that an
ActivityTask is returned, but that the value of taskToken is
an empty string. If a task is returned, the worker should use
its type to identify and process it correctly.
:type domain: string
:param domain: The name of the domain that contains the task
lists being polled.
:type task_list: string
:param task_list: Specifies the task list to poll for activity tasks.
:type identity: string
:param identity: Identity of the worker making the request, which
is recorded in the ActivityTaskStarted event in the workflow
history. This enables diagnostic tracing when problems arise.
The form of this identity is user defined.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('PollForActivityTask', {
'domain': domain,
'taskList': {'name': task_list},
'identity': identity,
})
def respond_activity_task_completed(self, task_token, result=None):
"""
Used by workers to tell the service that the ActivityTask
identified by the taskToken completed successfully with a
result (if provided).
:type task_token: string
:param task_token: The taskToken of the ActivityTask.
:type result: string
:param result: The result of the activity task. It is a free
form string that is implementation specific.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('RespondActivityTaskCompleted', {
'taskToken': task_token,
'result': result,
})
def respond_activity_task_failed(self, task_token,
details=None, reason=None):
"""
Used by workers to tell the service that the ActivityTask
identified by the taskToken has failed with reason (if
specified).
:type task_token: string
:param task_token: The taskToken of the ActivityTask.
:type details: string
:param details: Optional detailed information about the failure.
:type reason: string
:param reason: Description of the error that may assist in diagnostics.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('RespondActivityTaskFailed', {
'taskToken': task_token,
'details': details,
'reason': reason,
})
def respond_activity_task_canceled(self, task_token, details=None):
"""
Used by workers to tell the service that the ActivityTask
identified by the taskToken was successfully
canceled. Additional details can be optionally provided using
the details argument.
:type task_token: string
:param task_token: The taskToken of the ActivityTask.
:type details: string
:param details: Optional detailed information about the failure.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('RespondActivityTaskCanceled', {
'taskToken': task_token,
'details': details,
})
def record_activity_task_heartbeat(self, task_token, details=None):
"""
Used by activity workers to report to the service that the
ActivityTask represented by the specified taskToken is still
making progress. The worker can also (optionally) specify
details of the progress, for example percent complete, using
the details parameter. This action can also be used by the
worker as a mechanism to check if cancellation is being
requested for the activity task. If a cancellation is being
attempted for the specified task, then the boolean
cancelRequested flag returned by the service is set to true.
:type task_token: string
:param task_token: The taskToken of the ActivityTask.
:type details: string
:param details: If specified, contains details about the
progress of the task.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('RecordActivityTaskHeartbeat', {
'taskToken': task_token,
'details': details,
})
# Actions related to Deciders
def poll_for_decision_task(self, domain, task_list, identity=None,
maximum_page_size=None,
next_page_token=None,
reverse_order=None):
"""
Used by deciders to get a DecisionTask from the specified
decision taskList. A decision task may be returned for any
open workflow execution that is using the specified task
list. The task includes a paginated view of the history of the
workflow execution. The decider should use the workflow type
and the history to determine how to properly handle the task.
:type domain: string
:param domain: The name of the domain containing the task
lists to poll.
:type task_list: string
:param task_list: Specifies the task list to poll for decision tasks.
:type identity: string
:param identity: Identity of the decider making the request,
which is recorded in the DecisionTaskStarted event in the
workflow history. This enables diagnostic tracing when
problems arise. The form of this identity is user defined.
:type maximum_page_size: integer :param maximum_page_size: The
maximum number of history events returned in each page. The
default is 100, but the caller can override this value to a
page size smaller than the default. You cannot specify a page
size greater than 100.
:type next_page_token: string
:param next_page_token: If on a previous call to this method a
NextPageToken was returned, the results are being paginated.
To get the next page of results, repeat the call with the
returned token and all other arguments unchanged.
:type reverse_order: boolean
:param reverse_order: When set to true, returns the events in
reverse order. By default the results are returned in
ascending order of the eventTimestamp of the events.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('PollForDecisionTask', {
'domain': domain,
'taskList': {'name': task_list},
'identity': identity,
'maximumPageSize': maximum_page_size,
'nextPageToken': next_page_token,
'reverseOrder': reverse_order,
})
def respond_decision_task_completed(self, task_token,
decisions=None,
execution_context=None):
"""
Used by deciders to tell the service that the DecisionTask
identified by the taskToken has successfully completed.
The decisions argument specifies the list of decisions
made while processing the task.
:type task_token: string
:param task_token: The taskToken of the ActivityTask.
:type decisions: list
:param decisions: The list of decisions (possibly empty) made by
the decider while processing this decision task. See the docs
for the Decision structure for details.
:type execution_context: string
:param execution_context: User defined context to add to
workflow execution.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('RespondDecisionTaskCompleted', {
'taskToken': task_token,
'decisions': decisions,
'executionContext': execution_context,
})
def request_cancel_workflow_execution(self, domain, workflow_id,
run_id=None):
"""
Records a WorkflowExecutionCancelRequested event in the
currently running workflow execution identified by the given
domain, workflowId, and runId. This logically requests the
cancellation of the workflow execution as a whole. It is up to
the decider to take appropriate actions when it receives an
execution history with this event.
:type domain: string
:param domain: The name of the domain containing the workflow
execution to cancel.
:type run_id: string
:param run_id: The runId of the workflow execution to cancel.
:type workflow_id: string
:param workflow_id: The workflowId of the workflow execution
to cancel.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('RequestCancelWorkflowExecution', {
'domain': domain,
'workflowId': workflow_id,
'runId': run_id,
})
def start_workflow_execution(self, domain, workflow_id,
workflow_name, workflow_version,
task_list=None, child_policy=None,
execution_start_to_close_timeout=None,
input=None, tag_list=None,
task_start_to_close_timeout=None):
"""
Starts an execution of the workflow type in the specified
domain using the provided workflowId and input data.
:type domain: string
:param domain: The name of the domain in which the workflow
execution is created.
:type workflow_id: string
:param workflow_id: The user defined identifier associated with
the workflow execution. You can use this to associate a
custom identifier with the workflow execution. You may
specify the same identifier if a workflow execution is
logically a restart of a previous execution. You cannot
have two open workflow executions with the same workflowId
at the same time.
:type workflow_name: string
:param workflow_name: The name of the workflow type.
:type workflow_version: string
:param workflow_version: The version of the workflow type.
:type task_list: string
:param task_list: The task list to use for the decision tasks
generated for this workflow execution. This overrides the
defaultTaskList specified when registering the workflow type.
:type child_policy: string
:param child_policy: If set, specifies the policy to use for the
child workflow executions of this workflow execution if it
is terminated, by calling the TerminateWorkflowExecution
action explicitly or due to an expired timeout. This policy
overrides the default child policy specified when registering
the workflow type using RegisterWorkflowType. The supported
child policies are:
* TERMINATE: the child executions will be terminated.
* REQUEST_CANCEL: a request to cancel will be attempted
for each child execution by recording a
WorkflowExecutionCancelRequested event in its history.
It is up to the decider to take appropriate actions
when it receives an execution history with this event.
* ABANDON: no action will be taken. The child executions
will continue to run.
:type execution_start_to_close_timeout: string
:param execution_start_to_close_timeout: The total duration for
this workflow execution. This overrides the
defaultExecutionStartToCloseTimeout specified when
registering the workflow type.
:type input: string
:param input: The input for the workflow
execution. This is a free form string which should be
meaningful to the workflow you are starting. This input is
made available to the new workflow execution in the
WorkflowExecutionStarted history event.
:type tag_list: list :param tag_list: The list of tags to
associate with the workflow execution. You can specify a
maximum of 5 tags. You can list workflow executions with a
specific tag by calling list_open_workflow_executions or
list_closed_workflow_executions and specifying a TagFilter.
:type task_start_to_close_timeout: string :param
task_start_to_close_timeout: Specifies the maximum duration of
decision tasks for this workflow execution. This parameter
overrides the defaultTaskStartToCloseTimout specified when
registering the workflow type using register_workflow_type.
:raises: UnknownResourceFault, TypeDeprecatedFault,
SWFWorkflowExecutionAlreadyStartedError, SWFLimitExceededError,
SWFOperationNotPermittedError, DefaultUndefinedFault
"""
return self.json_request('StartWorkflowExecution', {
'domain': domain,
'workflowId': workflow_id,
'workflowType': {'name': workflow_name,
'version': workflow_version},
'taskList': {'name': task_list},
'childPolicy': child_policy,
'executionStartToCloseTimeout': execution_start_to_close_timeout,
'input': input,
'tagList': tag_list,
'taskStartToCloseTimeout': task_start_to_close_timeout,
})
def signal_workflow_execution(self, domain, signal_name, workflow_id,
input=None, run_id=None):
"""
Records a WorkflowExecutionSignaled event in the workflow
execution history and creates a decision task for the workflow
execution identified by the given domain, workflowId and
runId. The event is recorded with the specified user defined
signalName and input (if provided).
:type domain: string
:param domain: The name of the domain containing the workflow
execution to signal.
:type signal_name: string
:param signal_name: The name of the signal. This name must be
meaningful to the target workflow.
:type workflow_id: string
:param workflow_id: The workflowId of the workflow execution
to signal.
:type input: string
:param input: Data to attach to the WorkflowExecutionSignaled
event in the target workflow execution's history.
:type run_id: string
:param run_id: The runId of the workflow execution to signal.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('SignalWorkflowExecution', {
'domain': domain,
'signalName': signal_name,
'workflowId': workflow_id,
'input': input,
'runId': run_id,
})
def terminate_workflow_execution(self, domain, workflow_id,
child_policy=None, details=None,
reason=None, run_id=None):
"""
Records a WorkflowExecutionTerminated event and forces closure
of the workflow execution identified by the given domain,
runId, and workflowId. The child policy, registered with the
workflow type or specified when starting this execution, is
applied to any open child workflow executions of this workflow
execution.
:type domain: string
:param domain: The domain of the workflow execution to terminate.
:type workflow_id: string
:param workflow_id: The workflowId of the workflow execution
to terminate.
:type child_policy: string
:param child_policy: If set, specifies the policy to use for
the child workflow executions of the workflow execution being
terminated. This policy overrides the child policy specified
for the workflow execution at registration time or when
starting the execution. The supported child policies are:
* TERMINATE: the child executions will be terminated.
* REQUEST_CANCEL: a request to cancel will be attempted
for each child execution by recording a
WorkflowExecutionCancelRequested event in its
history. It is up to the decider to take appropriate
actions when it receives an execution history with this
event.
* ABANDON: no action will be taken. The child executions
will continue to run.
:type details: string
:param details: Optional details for terminating the
workflow execution.
:type reason: string
:param reason: An optional descriptive reason for terminating
the workflow execution.
:type run_id: string
:param run_id: The runId of the workflow execution to terminate.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('TerminateWorkflowExecution', {
'domain': domain,
'workflowId': workflow_id,
'childPolicy': child_policy,
'details': details,
'reason': reason,
'runId': run_id,
})
# Actions related to Administration
## Activity Management
def register_activity_type(self, domain, name, version, task_list=None,
default_task_heartbeat_timeout=None,
default_task_schedule_to_close_timeout=None,
default_task_schedule_to_start_timeout=None,
default_task_start_to_close_timeout=None,
description=None):
"""
Registers a new activity type along with its configuration
settings in the specified domain.
:type domain: string
:param domain: The name of the domain in which this activity is
to be registered.
:type name: string
:param name: The name of the activity type within the domain.
:type version: string
:param version: The version of the activity type.
:type task_list: string
:param task_list: If set, specifies the default task list to
use for scheduling tasks of this activity type. This default
task list is used if a task list is not provided when a task
is scheduled through the schedule_activity_task Decision.
:type default_task_heartbeat_timeout: string
:param default_task_heartbeat_timeout: If set, specifies the
default maximum time before which a worker processing a task
of this type must report progress by calling
RecordActivityTaskHeartbeat. If the timeout is exceeded, the
activity task is automatically timed out. This default can be
overridden when scheduling an activity task using the
ScheduleActivityTask Decision. If the activity worker
subsequently attempts to record a heartbeat or returns a
result, the activity worker receives an UnknownResource
fault. In this case, Amazon SWF no longer considers the
activity task to be valid; the activity worker should clean up
the activity task.no docs
:type default_task_schedule_to_close_timeout: string
:param default_task_schedule_to_close_timeout: If set,
specifies the default maximum duration for a task of this
activity type. This default can be overridden when scheduling
an activity task using the ScheduleActivityTask Decision.no
docs
:type default_task_schedule_to_start_timeout: string
:param default_task_schedule_to_start_timeout: If set,
specifies the default maximum duration that a task of this
activity type can wait before being assigned to a worker. This
default can be overridden when scheduling an activity task
using the ScheduleActivityTask Decision.
:type default_task_start_to_close_timeout: string
:param default_task_start_to_close_timeout: If set, specifies
the default maximum duration that a worker can take to process
tasks of this activity type. This default can be overridden
when scheduling an activity task using the
ScheduleActivityTask Decision.
:type description: string
:param description: A textual description of the activity type.
:raises: SWFTypeAlreadyExistsError, SWFLimitExceededError,
UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('RegisterActivityType', {
'domain': domain,
'name': name,
'version': version,
'defaultTaskList': {'name': task_list},
'defaultTaskHeartbeatTimeout': default_task_heartbeat_timeout,
'defaultTaskScheduleToCloseTimeout': default_task_schedule_to_close_timeout,
'defaultTaskScheduleToStartTimeout': default_task_schedule_to_start_timeout,
'defaultTaskStartToCloseTimeout': default_task_start_to_close_timeout,
'description': description,
})
def deprecate_activity_type(self, domain, activity_name, activity_version):
"""
Returns information about the specified activity type. This
includes configuration settings provided at registration time
as well as other general information about the type.
:type domain: string
:param domain: The name of the domain in which the activity
type is registered.
:type activity_name: string
:param activity_name: The name of this activity.
:type activity_version: string
:param activity_version: The version of this activity.
:raises: UnknownResourceFault, TypeDeprecatedFault,
SWFOperationNotPermittedError
"""
return self.json_request('DeprecateActivityType', {
'domain': domain,
'activityType': {'name': activity_name,
'version': activity_version}
})
## Workflow Management
def register_workflow_type(self, domain, name, version,
task_list=None,
default_child_policy=None,
default_execution_start_to_close_timeout=None,
default_task_start_to_close_timeout=None,
description=None):
"""
Registers a new workflow type and its configuration settings
in the specified domain.
:type domain: string
:param domain: The name of the domain in which to register
the workflow type.
:type name: string
:param name: The name of the workflow type.
:type version: string
:param version: The version of the workflow type.
:type task_list: list of name, version of tasks
:param task_list: If set, specifies the default task list to use
for scheduling decision tasks for executions of this workflow
type. This default is used only if a task list is not provided
when starting the execution through the StartWorkflowExecution
Action or StartChildWorkflowExecution Decision.
:type default_child_policy: string
:param default_child_policy: If set, specifies the default
policy to use for the child workflow executions when a
workflow execution of this type is terminated, by calling the
TerminateWorkflowExecution action explicitly or due to an
expired timeout. This default can be overridden when starting
a workflow execution using the StartWorkflowExecution action
or the StartChildWorkflowExecution Decision. The supported
child policies are:
* TERMINATE: the child executions will be terminated.
* REQUEST_CANCEL: a request to cancel will be attempted
for each child execution by recording a
WorkflowExecutionCancelRequested event in its
history. It is up to the decider to take appropriate
actions when it receives an execution history with this
event.
* ABANDON: no action will be taken. The child executions
will continue to run.no docs
:type default_execution_start_to_close_timeout: string
:param default_execution_start_to_close_timeout: If set,
specifies the default maximum duration for executions of this
workflow type. You can override this default when starting an
execution through the StartWorkflowExecution Action or
StartChildWorkflowExecution Decision.
:type default_task_start_to_close_timeout: string
:param default_task_start_to_close_timeout: If set, specifies
the default maximum duration of decision tasks for this
workflow type. This default can be overridden when starting a
workflow execution using the StartWorkflowExecution action or
the StartChildWorkflowExecution Decision.
:type description: string
:param description: Textual description of the workflow type.
:raises: SWFTypeAlreadyExistsError, SWFLimitExceededError,
UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('RegisterWorkflowType', {
'domain': domain,
'name': name,
'version': version,
'defaultTaskList': {'name': task_list},
'defaultChildPolicy': default_child_policy,
'defaultExecutionStartToCloseTimeout': default_execution_start_to_close_timeout,
'defaultTaskStartToCloseTimeout': default_task_start_to_close_timeout,
'description': description,
})
def deprecate_workflow_type(self, domain, workflow_name, workflow_version):
"""
Deprecates the specified workflow type. After a workflow type
has been deprecated, you cannot create new executions of that
type. Executions that were started before the type was
deprecated will continue to run. A deprecated workflow type
may still be used when calling visibility actions.
:type domain: string
:param domain: The name of the domain in which the workflow
type is registered.
:type workflow_name: string
:param workflow_name: The name of the workflow type.
:type workflow_version: string
:param workflow_version: The version of the workflow type.
:raises: UnknownResourceFault, TypeDeprecatedFault,
SWFOperationNotPermittedError
"""
return self.json_request('DeprecateWorkflowType', {
'domain': domain,
'workflowType': {'name': workflow_name,
'version': workflow_version},
})
## Domain Management
def register_domain(self, name,
workflow_execution_retention_period_in_days,
description=None):
"""
Registers a new domain.
:type name: string
:param name: Name of the domain to register. The name must be unique.
:type workflow_execution_retention_period_in_days: string
:param workflow_execution_retention_period_in_days: Specifies
the duration *in days* for which the record (including the
history) of workflow executions in this domain should be kept
by the service. After the retention period, the workflow
execution will not be available in the results of visibility
calls. If a duration of NONE is specified, the records for
workflow executions in this domain are not retained at all.
:type description: string
:param description: Textual description of the domain.
:raises: SWFDomainAlreadyExistsError, SWFLimitExceededError,
SWFOperationNotPermittedError
"""
return self.json_request('RegisterDomain', {
'name': name,
'workflowExecutionRetentionPeriodInDays': workflow_execution_retention_period_in_days,
'description': description,
})
def deprecate_domain(self, name):
"""
Deprecates the specified domain. After a domain has been
deprecated it cannot be used to create new workflow executions
or register new types. However, you can still use visibility
actions on this domain. Deprecating a domain also deprecates
all activity and workflow types registered in the
domain. Executions that were started before the domain was
deprecated will continue to run.
:type name: string
:param name: The name of the domain to deprecate.
:raises: UnknownResourceFault, DomainDeprecatedFault,
SWFOperationNotPermittedError
"""
return self.json_request('DeprecateDomain', {'name': name})
# Visibility Actions
## Activity Visibility
def list_activity_types(self, domain, registration_status,
name=None,
maximum_page_size=None,
next_page_token=None, reverse_order=None):
"""
Returns information about all activities registered in the
specified domain that match the specified name and
registration status. The result includes information like
creation date, current status of the activity, etc. The
results may be split into multiple pages. To retrieve
subsequent pages, make the call again using the nextPageToken
returned by the initial call.
:type domain: string
:param domain: The name of the domain in which the activity
types have been registered.
:type registration_status: string
:param registration_status: Specifies the registration status
of the activity types to list. Valid values are:
* REGISTERED
* DEPRECATED
:type name: string
:param name: If specified, only lists the activity types that
have this name.
:type maximum_page_size: integer
:param maximum_page_size: The maximum number of results
returned in each page. The default is 100, but the caller can
override this value to a page size smaller than the
default. You cannot specify a page size greater than 100.
:type next_page_token: string
:param next_page_token: If on a previous call to this method a
NextResultToken was returned, the results have more than one
page. To get the next page of results, repeat the call with
the nextPageToken and keep all other arguments unchanged.
:type reverse_order: boolean
:param reverse_order: When set to true, returns the results in
reverse order. By default the results are returned in
ascending alphabetical order of the name of the activity
types.
:raises: SWFOperationNotPermittedError, UnknownResourceFault
"""
return self.json_request('ListActivityTypes', {
'domain': domain,
'name': name,
'registrationStatus': registration_status,
'maximumPageSize': maximum_page_size,
'nextPageToken': next_page_token,
'reverseOrder': reverse_order,
})
def describe_activity_type(self, domain, activity_name, activity_version):
"""
Returns information about the specified activity type. This
includes configuration settings provided at registration time
as well as other general information about the type.
:type domain: string
:param domain: The name of the domain in which the activity
type is registered.
:type activity_name: string
:param activity_name: The name of this activity.
:type activity_version: string
:param activity_version: The version of this activity.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('DescribeActivityType', {
'domain': domain,
'activityType': {'name': activity_name,
'version': activity_version}
})
## Workflow Visibility
def list_workflow_types(self, domain, registration_status,
maximum_page_size=None, name=None,
next_page_token=None, reverse_order=None):
"""
Returns information about workflow types in the specified
domain. The results may be split into multiple pages that can
be retrieved by making the call repeatedly.
:type domain: string
:param domain: The name of the domain in which the workflow
types have been registered.
:type registration_status: string
:param registration_status: Specifies the registration status
of the activity types to list. Valid values are:
* REGISTERED
* DEPRECATED
:type name: string
:param name: If specified, lists the workflow type with this name.
:type maximum_page_size: integer
:param maximum_page_size: The maximum number of results
returned in each page. The default is 100, but the caller can
override this value to a page size smaller than the
default. You cannot specify a page size greater than 100.
:type next_page_token: string
:param next_page_token: If on a previous call to this method a
NextPageToken was returned, the results are being
paginated. To get the next page of results, repeat the call
with the returned token and all other arguments unchanged.
:type reverse_order: boolean
:param reverse_order: When set to true, returns the results in
reverse order. By default the results are returned in
ascending alphabetical order of the name of the workflow
types.
:raises: SWFOperationNotPermittedError, UnknownResourceFault
"""
return self.json_request('ListWorkflowTypes', {
'domain': domain,
'name': name,
'registrationStatus': registration_status,
'maximumPageSize': maximum_page_size,
'nextPageToken': next_page_token,
'reverseOrder': reverse_order,
})
def describe_workflow_type(self, domain, workflow_name, workflow_version):
"""
Returns information about the specified workflow type. This
includes configuration settings specified when the type was
registered and other information such as creation date,
current status, etc.
:type domain: string
:param domain: The name of the domain in which this workflow
type is registered.
:type workflow_name: string
:param workflow_name: The name of the workflow type.
:type workflow_version: string
:param workflow_version: The version of the workflow type.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('DescribeWorkflowType', {
'domain': domain,
'workflowType': {'name': workflow_name,
'version': workflow_version}
})
## Workflow Execution Visibility
def describe_workflow_execution(self, domain, run_id, workflow_id):
"""
Returns information about the specified workflow execution
including its type and some statistics.
:type domain: string
:param domain: The name of the domain containing the
workflow execution.
:type run_id: string
:param run_id: A system generated unique identifier for the
workflow execution.
:type workflow_id: string
:param workflow_id: The user defined identifier associated
with the workflow execution.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('DescribeWorkflowExecution', {
'domain': domain,
'execution': {'runId': run_id,
'workflowId': workflow_id},
})
def get_workflow_execution_history(self, domain, run_id, workflow_id,
maximum_page_size=None,
next_page_token=None,
reverse_order=None):
"""
Returns the history of the specified workflow execution. The
results may be split into multiple pages. To retrieve
subsequent pages, make the call again using the nextPageToken
returned by the initial call.
:type domain: string
:param domain: The name of the domain containing the
workflow execution.
:type run_id: string
:param run_id: A system generated unique identifier for the
workflow execution.
:type workflow_id: string
:param workflow_id: The user defined identifier associated
with the workflow execution.
:type maximum_page_size: integer
:param maximum_page_size: Specifies the maximum number of
history events returned in one page. The next page in the
result is identified by the NextPageToken returned. By default
100 history events are returned in a page but the caller can
override this value to a page size smaller than the
default. You cannot specify a page size larger than 100.
:type next_page_token: string
:param next_page_token: If a NextPageToken is returned, the
result has more than one pages. To get the next page, repeat
the call and specify the nextPageToken with all other
arguments unchanged.
:type reverse_order: boolean
:param reverse_order: When set to true, returns the events in
reverse order. By default the results are returned in
ascending order of the eventTimeStamp of the events.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('GetWorkflowExecutionHistory', {
'domain': domain,
'execution': {'runId': run_id,
'workflowId': workflow_id},
'maximumPageSize': maximum_page_size,
'nextPageToken': next_page_token,
'reverseOrder': reverse_order,
})
def count_open_workflow_executions(self, domain, latest_date, oldest_date,
tag=None,
workflow_id=None,
workflow_name=None,
workflow_version=None):
"""
Returns the number of open workflow executions within the
given domain that meet the specified filtering criteria.
.. note:
workflow_id, workflow_name/workflow_version and tag are mutually
exclusive. You can specify at most one of these in a request.
:type domain: string
:param domain: The name of the domain containing the
workflow executions to count.
:type latest_date: timestamp
:param latest_date: Specifies the latest start or close date
and time to return.
:type oldest_date: timestamp
:param oldest_date: Specifies the oldest start or close date
and time to return.
:type workflow_name: string
:param workflow_name: Name of the workflow type to filter on.
:type workflow_version: string
:param workflow_version: Version of the workflow type to filter on.
:type tag: string
:param tag: If specified, only executions that have a tag
that matches the filter are counted.
:type workflow_id: string
:param workflow_id: If specified, only workflow executions
matching the workflow_id are counted.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('CountOpenWorkflowExecutions', {
'domain': domain,
'startTimeFilter': {'oldestDate': oldest_date,
'latestDate': latest_date},
'typeFilter': {'name': workflow_name,
'version': workflow_version},
'executionFilter': {'workflowId': workflow_id},
'tagFilter': {'tag': tag},
})
def list_open_workflow_executions(self, domain,
oldest_date,
latest_date=None,
tag=None,
workflow_id=None,
workflow_name=None,
workflow_version=None,
maximum_page_size=None,
next_page_token=None,
reverse_order=None):
"""
Returns the list of open workflow executions within the
given domain that meet the specified filtering criteria.
.. note:
workflow_id, workflow_name/workflow_version
and tag are mutually exclusive. You can specify at most
one of these in a request.
:type domain: string
:param domain: The name of the domain containing the
workflow executions to count.
:type latest_date: timestamp
:param latest_date: Specifies the latest start or close date
and time to return.
:type oldest_date: timestamp
:param oldest_date: Specifies the oldest start or close date
and time to return.
:type tag: string
:param tag: If specified, only executions that have a tag
that matches the filter are counted.
:type workflow_id: string
:param workflow_id: If specified, only workflow executions
matching the workflow_id are counted.
:type workflow_name: string
:param workflow_name: Name of the workflow type to filter on.
:type workflow_version: string
:param workflow_version: Version of the workflow type to filter on.
:type maximum_page_size: integer
:param maximum_page_size: The maximum number of results
returned in each page. The default is 100, but the caller can
override this value to a page size smaller than the
default. You cannot specify a page size greater than 100.
:type next_page_token: string
:param next_page_token: If on a previous call to this method a
NextPageToken was returned, the results are being
paginated. To get the next page of results, repeat the call
with the returned token and all other arguments unchanged.
:type reverse_order: boolean
:param reverse_order: When set to true, returns the results in
reverse order. By default the results are returned in
descending order of the start or the close time of the
executions.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('ListOpenWorkflowExecutions', {
'domain': domain,
'startTimeFilter': {'oldestDate': oldest_date,
'latestDate': latest_date},
'tagFilter': {'tag': tag},
'typeFilter': {'name': workflow_name,
'version': workflow_version},
'executionFilter': {'workflowId': workflow_id},
'maximumPageSize': maximum_page_size,
'nextPageToken': next_page_token,
'reverseOrder': reverse_order,
})
def count_closed_workflow_executions(self, domain,
start_latest_date=None,
start_oldest_date=None,
close_latest_date=None,
close_oldest_date=None,
close_status=None,
tag=None,
workflow_id=None,
workflow_name=None,
workflow_version=None):
"""
Returns the number of closed workflow executions within the
given domain that meet the specified filtering criteria.
.. note:
close_status, workflow_id, workflow_name/workflow_version
and tag are mutually exclusive. You can specify at most
one of these in a request.
.. note:
start_latest_date/start_oldest_date and
close_latest_date/close_oldest_date are mutually
exclusive. You can specify at most one of these in a request.
:type domain: string
:param domain: The name of the domain containing the
workflow executions to count.
:type start_latest_date: timestamp
:param start_latest_date: If specified, only workflow executions
that meet the start time criteria of the filter are counted.
:type start_oldest_date: timestamp
:param start_oldest_date: If specified, only workflow executions
that meet the start time criteria of the filter are counted.
:type close_latest_date: timestamp
:param close_latest_date: If specified, only workflow executions
that meet the close time criteria of the filter are counted.
:type close_oldest_date: timestamp
:param close_oldest_date: If specified, only workflow executions
that meet the close time criteria of the filter are counted.
:type close_status: string
:param close_status: The close status that must match the close status
of an execution for it to meet the criteria of this filter.
Valid values are:
* COMPLETED
* FAILED
* CANCELED
* TERMINATED
* CONTINUED_AS_NEW
* TIMED_OUT
:type tag: string
:param tag: If specified, only executions that have a tag
that matches the filter are counted.
:type workflow_id: string
:param workflow_id: If specified, only workflow executions
matching the workflow_id are counted.
:type workflow_name: string
:param workflow_name: Name of the workflow type to filter on.
:type workflow_version: string
:param workflow_version: Version of the workflow type to filter on.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('CountClosedWorkflowExecutions', {
'domain': domain,
'startTimeFilter': {'oldestDate': start_oldest_date,
'latestDate': start_latest_date},
'closeTimeFilter': {'oldestDate': close_oldest_date,
'latestDate': close_latest_date},
'closeStatusFilter': {'status': close_status},
'tagFilter': {'tag': tag},
'typeFilter': {'name': workflow_name,
'version': workflow_version},
'executionFilter': {'workflowId': workflow_id}
})
def list_closed_workflow_executions(self, domain,
start_latest_date=None,
start_oldest_date=None,
close_latest_date=None,
close_oldest_date=None,
close_status=None,
tag=None,
workflow_id=None,
workflow_name=None,
workflow_version=None,
maximum_page_size=None,
next_page_token=None,
reverse_order=None):
"""
Returns the number of closed workflow executions within the
given domain that meet the specified filtering criteria.
.. note:
close_status, workflow_id, workflow_name/workflow_version
and tag are mutually exclusive. You can specify at most
one of these in a request.
.. note:
start_latest_date/start_oldest_date and
close_latest_date/close_oldest_date are mutually
exclusive. You can specify at most one of these in a request.
:type domain: string
:param domain: The name of the domain containing the
workflow executions to count.
:type start_latest_date: timestamp
:param start_latest_date: If specified, only workflow executions
that meet the start time criteria of the filter are counted.
:type start_oldest_date: timestamp
:param start_oldest_date: If specified, only workflow executions
that meet the start time criteria of the filter are counted.
:type close_latest_date: timestamp
:param close_latest_date: If specified, only workflow executions
that meet the close time criteria of the filter are counted.
:type close_oldest_date: timestamp
:param close_oldest_date: If specified, only workflow executions
that meet the close time criteria of the filter are counted.
:type close_status: string
:param close_status: The close status that must match the close status
of an execution for it to meet the criteria of this filter.
Valid values are:
* COMPLETED
* FAILED
* CANCELED
* TERMINATED
* CONTINUED_AS_NEW
* TIMED_OUT
:type tag: string
:param tag: If specified, only executions that have a tag
that matches the filter are counted.
:type workflow_id: string
:param workflow_id: If specified, only workflow executions
matching the workflow_id are counted.
:type workflow_name: string
:param workflow_name: Name of the workflow type to filter on.
:type workflow_version: string
:param workflow_version: Version of the workflow type to filter on.
:type maximum_page_size: integer
:param maximum_page_size: The maximum number of results
returned in each page. The default is 100, but the caller can
override this value to a page size smaller than the
default. You cannot specify a page size greater than 100.
:type next_page_token: string
:param next_page_token: If on a previous call to this method a
NextPageToken was returned, the results are being
paginated. To get the next page of results, repeat the call
with the returned token and all other arguments unchanged.
:type reverse_order: boolean
:param reverse_order: When set to true, returns the results in
reverse order. By default the results are returned in
descending order of the start or the close time of the
executions.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('ListClosedWorkflowExecutions', {
'domain': domain,
'startTimeFilter': {'oldestDate': start_oldest_date,
'latestDate': start_latest_date},
'closeTimeFilter': {'oldestDate': close_oldest_date,
'latestDate': close_latest_date},
'executionFilter': {'workflowId': workflow_id},
'closeStatusFilter': {'status': close_status},
'tagFilter': {'tag': tag},
'typeFilter': {'name': workflow_name,
'version': workflow_version},
'maximumPageSize': maximum_page_size,
'nextPageToken': next_page_token,
'reverseOrder': reverse_order,
})
## Domain Visibility
def list_domains(self, registration_status,
maximum_page_size=None,
next_page_token=None, reverse_order=None):
"""
Returns the list of domains registered in the account. The
results may be split into multiple pages. To retrieve
subsequent pages, make the call again using the nextPageToken
returned by the initial call.
:type registration_status: string
:param registration_status: Specifies the registration status
of the domains to list. Valid Values:
* REGISTERED
* DEPRECATED
:type maximum_page_size: integer
:param maximum_page_size: The maximum number of results
returned in each page. The default is 100, but the caller can
override this value to a page size smaller than the
default. You cannot specify a page size greater than 100.
:type next_page_token: string
:param next_page_token: If on a previous call to this method a
NextPageToken was returned, the result has more than one
page. To get the next page of results, repeat the call with
the returned token and all other arguments unchanged.
:type reverse_order: boolean
:param reverse_order: When set to true, returns the results in
reverse order. By default the results are returned in
ascending alphabetical order of the name of the domains.
:raises: SWFOperationNotPermittedError
"""
return self.json_request('ListDomains', {
'registrationStatus': registration_status,
'maximumPageSize': maximum_page_size,
'nextPageToken': next_page_token,
'reverseOrder': reverse_order,
})
def describe_domain(self, name):
"""
Returns information about the specified domain including
description and status.
:type name: string
:param name: The name of the domain to describe.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('DescribeDomain', {'name': name})
## Task List Visibility
def count_pending_decision_tasks(self, domain, task_list):
"""
Returns the estimated number of decision tasks in the
specified task list. The count returned is an approximation
and is not guaranteed to be exact. If you specify a task list
that no decision task was ever scheduled in then 0 will be
returned.
:type domain: string
:param domain: The name of the domain that contains the task list.
:type task_list: string
:param task_list: The name of the task list.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('CountPendingDecisionTasks', {
'domain': domain,
'taskList': {'name': task_list}
})
def count_pending_activity_tasks(self, domain, task_list):
"""
Returns the estimated number of activity tasks in the
specified task list. The count returned is an approximation
and is not guaranteed to be exact. If you specify a task list
that no activity task was ever scheduled in then 0 will be
returned.
:type domain: string
:param domain: The name of the domain that contains the task list.
:type task_list: string
:param task_list: The name of the task list.
:raises: UnknownResourceFault, SWFOperationNotPermittedError
"""
return self.json_request('CountPendingActivityTasks', {
'domain': domain,
'taskList': {'name': task_list}
})
| apache-2.0 |
lyarwood/sosreport | sos/plugins/squid.py | 11 | 2034 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from sos.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin
class Squid(Plugin):
"""Squid caching proxy
"""
plugin_name = 'squid'
profiles = ('webserver', 'services', 'sysmgmt')
class RedHatSquid(Squid, RedHatPlugin):
files = ('/etc/squid/squid.conf',)
packages = ('squid',)
def setup(self):
log_size = self.get_option('log_size')
log_path = "/var/log/squid/"
self.add_copy_spec("/etc/squid/squid.conf")
self.add_copy_spec_limit(log_path + "access.log", sizelimit=log_size)
self.add_copy_spec_limit(log_path + "cache.log", sizelimit=log_size)
self.add_copy_spec_limit(log_path + "squid.out", sizelimit=log_size)
class DebianSquid(Squid, DebianPlugin, UbuntuPlugin):
plugin_name = 'squid'
files = ('/etc/squid3/squid.conf',)
packages = ('squid3',)
def setup(self):
self.add_copy_spec_limit("/etc/squid3/squid.conf",
sizelimit=self.get_option('log_size'))
self.add_copy_spec_limit("/var/log/squid3/*",
sizelimit=self.get_option('log_size'))
self.add_copy_spec(['/etc/squid-deb-proxy'])
self.add_copy_spec_limit("/var/log/squid-deb-proxy/*",
sizelimit=self.get_option('log_size'))
# vim: set et ts=4 sw=4 :
| gpl-2.0 |
florentx/OpenUpgrade | addons/survey/test/__init__.py | 118 | 1043 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_survey
checks = [
test_survey
]
| agpl-3.0 |
JeremiasE/KFormula | kspread/plugins/scripting/scripts/functions.py | 3 | 3372 | #!/usr/bin/env kross
"""
Python script that provides additional formula-functions
to KSpread.
The script could be used in two ways;
1. Embedded in KSpread by execution via the "Tools=>Scripts"
menu or from the "Tools=>Script Manager". The functions.py
script may not be visible if not installed. So, for testing
better use the second way;
2. Run KSpread with;
# make the script executable
chmod 755 `kde4-config --install data`/kspread/scripts/functions/functions.py
# run KSpread
kspread --scriptfile `kde4-config --install data`/kspread/scripts/functions/functions.py
(C)2006 Sebastian Sauer <mail@dipe.org>
http://kross.dipe.org
http://www.koffice.org/kspread
Dual-licensed under LGPL v2+higher and the BSD license.
"""
class Functions:
""" The Functions class adds some KSpread formula functions on
the fly and provides them to KSpread. """
def __init__(self, scriptaction):
""" Some initial work like the import of the Kross and KSpread functionality
and test functions are added to demonstrate the usage. """
import os, sys
try:
import Kross
except:
raise "Failed to import the Kross module."
self.scriptaction = scriptaction
#self.currentpath = self.scriptaction.currentPath()
self.embeddedInKSpread = False
try:
import KSpread
self.kspread = KSpread
self.embeddedInKSpread = True
except ImportError:
try:
import Kross
self.kspread = Kross.module("kspread")
except ImportError:
raise "Failed to import the KSpread Kross module. Please run this script with \"kross thisscriptfile.py\""
self.addTestFunctions()
def addTestFunctions(self):
""" This method adds a new scripted formula function to KSpread. """
# create the new formula function "SCRIPT_TEST1"
functest1 = self.kspread.function("SCRIPT_TEST1")
# set the minimal number of parameters
functest1.minparam = 1
# set the maximal number of parameters, -1 means unlimited.
functest1.maxparam = 1
# set the comment displayed at "Help"
functest1.comment = (
"The SCRIPT_TEST1() function demonstrates how to use scripting"
"functions. All it does is to take a string as argument and"
"return the same string."
)
# set the syntax which is displayed at "Help".
functest1.syntax = "SCRIPT_TEST1(string)"
# set details about the parameter the formula functions expects.
functest1.addParameter("String", "The string that should be returned")
# add an example displayed at "Help".
functest1.addExample("SCRIPT_TEST1(\"Some string\")")
# this python function will be called by the KSpread formula function
def functest1callback(argument):
# just return the first argument
functest1.result = "%s" % argument[0]
# connect the python function with the KSpread formula function
functest1.connect("called(QVariantList)", functest1callback)
# and finally register the function to be able to use it within KSpread
functest1.registerFunction()
Functions( self )
| gpl-2.0 |
sathnaga/virt-test | tools/koji_pkgspec.py | 5 | 3995 | #!/usr/bin/env python
'''
This is a tool for that makes it easy to understand what a given KojiPkgSpec
syntax will expand to.
The main use case is making sure the packages specified in a KojiInstaller
will match the packages you intended to install.
'''
import sys, optparse
import common
from virttest import utils_koji, cartesian_config
class OptionParser(optparse.OptionParser):
'''
KojiPkgSpec App option parser
'''
def __init__(self):
optparse.OptionParser.__init__(self,
usage=('Usage: %prog [options] '
'[koji-pkg-spec]'))
general = optparse.OptionGroup(self, 'GENERAL OPTIONS')
general.add_option('-a', '--arch', dest='arch', default='x86_64',
help=('architecture of packages to list, together '
'with "noarch". defaults to "x86_64"'))
general.add_option('-t', '--tag', dest='tag', help='default koji tag')
self.add_option_group(general)
cartesian_config = optparse.OptionGroup(self, 'CARTESIAN CONFIG')
cartesian_config.add_option('-c', '--config', dest='config',
help=('use a cartesian configuration file '
'for fetching package values'))
self.add_option_group(cartesian_config)
class App:
'''
KojiPkgSpec app
'''
def __init__(self):
self.opt_parser = OptionParser()
def usage(self):
self.opt_parser.print_help()
sys.exit(1)
def parse_cmdline(self):
self.options, self.args = self.opt_parser.parse_args()
# Check for a control file if not in prebuild mode.
if (len(self.args) < 1) and not self.options.config:
print "Missing Package Specification!"
self.usage()
def get_koji_qemu_kvm_tag_pkgs(self, config_file):
tag = None
pkgs = None
parser = cartesian_config.Parser(config_file)
for d in parser.get_dicts():
if tag is not None and pkgs is not None:
break
if d.has_key('koji_qemu_kvm_tag'):
if tag is None:
tag = d.get('koji_qemu_kvm_tag')
if d.has_key('koji_qemu_kvm_pkgs'):
if pkgs is None:
pkgs = d.get('koji_qemu_kvm_pkgs')
return (tag, pkgs)
def check_koji_pkg_spec(self, koji_pkg_spec):
if not koji_pkg_spec.is_valid():
print 'ERROR:', koji_pkg_spec.describe_invalid()
sys.exit(-1)
def print_koji_pkg_spec_info(self, koji_pkg_spec):
info = self.koji_client.get_pkg_info(koji_pkg_spec)
if not info:
print 'ERROR: could not find info about "%s"' % koji_pkg_spec.to_text()
return
name = info.get('name', 'unknown')
pkgs = self.koji_client.get_pkg_rpm_file_names(koji_pkg_spec,
arch=self.options.arch)
print 'Package name: %s' % name
print 'Package files:'
for p in pkgs:
print '\t* %s' % p
print
def main(self):
self.parse_cmdline()
self.koji_client = utils_koji.KojiClient()
pkgs = []
if self.options.tag:
utils_koji.set_default_koji_tag(self.options.tag)
if self.options.config:
tag, pkgs = self.get_koji_qemu_kvm_tag_pkgs(self.options.config)
if tag is not None:
utils_koji.set_default_koji_tag(tag)
if pkgs is not None:
pkgs = pkgs.split()
else:
pkgs = self.args
if pkgs:
for p in pkgs:
koji_pkg_spec = utils_koji.KojiPkgSpec(p)
self.check_koji_pkg_spec(koji_pkg_spec)
self.print_koji_pkg_spec_info(koji_pkg_spec)
if __name__ == '__main__':
app = App()
app.main()
| gpl-2.0 |
openshift/openshift-tools | openshift/installer/vendored/openshift-ansible-3.10.0-0.29.0/roles/lib_openshift/src/lib/service.py | 50 | 5741 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
annotations=None,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.annotations = annotations
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
if self.annotations:
self.data['metadata']['annotations'] = self.annotations
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
| apache-2.0 |
llllllllll/codetransformer | versioneer.py | 367 | 62474 |
# Version: 0.15
"""
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `style`: the style of version string to be produced. See "Styles" below for
details. Defaults to "pep440", which looks like
`TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py setup_versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py setup_versioneer` command (described below)
will append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None` and override `distutils.command.build_scripts`
to explicitly insert a copy of `versioneer.get_version()` into your
generated script.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string.
* `parentdir_prefix`:
a optional string, frequently the same as tag_prefix, which appears at the
start of all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
just omit the field from your `setup.cfg`.
This tool provides one script, named `versioneer`. That script has one mode,
"install", which writes a copy of `versioneer.py` into the current directory
and runs `versioneer.py setup` to finish the installation.
To versioneer-enable your project:
* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
populating it with the configuration values you decided earlier (note that
the option names are not case-sensitive):
````
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix = ""
parentdir_prefix = myproject-
````
* 2: Run `versioneer install`. This will do the following:
* copy `versioneer.py` into the top of your source tree
* create `_version.py` in the right place (`versionfile_source`)
* modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`)
* modify your `MANIFEST.in` to include both `versioneer.py` and the
generated `_version.py` in sdist tarballs
`versioneer install` will complain about any problems it finds with your
`setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
the problems.
* 3: add a `import versioneer` to your setup.py, and add the following
arguments to the setup() call:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: commit these changes to your VCS. To make sure you won't forget,
`versioneer install` will mark everything it touched for addition using
`git add`. Don't forget to add `setup.py` and `setup.cfg` too.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
least one tag in its history.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
### Upgrading to 0.15
Starting with this version, Versioneer is configured with a `[versioneer]`
section in your `setup.cfg` file. Earlier versions required the `setup.py` to
set attributes on the `versioneer` module immediately after import. The new
version will refuse to run (raising an exception during import) until you
have provided the necessary `setup.cfg` section.
In addition, the Versioneer package provides an executable named
`versioneer`, and the installation process is driven by running `versioneer
install`. In 0.14 and earlier, the executable was named
`versioneer-installer` and was run without an argument.
### Upgrading to 0.14
0.14 changes the format of the version string. 0.13 and earlier used
hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
plus-separated "local version" section strings, with dot-separated
components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
format, but should be ok with the new one.
### Upgrading from 0.11 to 0.12
Nothing special.
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py setup_versioneer`. This will enable the use of additional
version-control systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is hereby released into the
public domain. The `_version.py` that it creates is also in the public
domain.
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
pass
def get_root():
# we require that all commands are run from the project root, i.e. the
# directory that contains setup.py, setup.cfg, and versioneer.py .
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
pass
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
LONG_VERSION_PY['git'] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs-tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.15) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
pass
def get_versions(verbose=False):
# returns dict with two keys: 'version' and 'full'
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
return get_versions()["version"]
def get_cmdclass():
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix = ""
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| gpl-2.0 |
3dfxmadscientist/CBSS | openerp/addons/base/ir/ir_config_parameter.py | 72 | 3862 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Store database-specific configuration parameters
"""
import uuid
import datetime
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import misc, config
"""
A dictionary holding some configuration parameters to be initialized when the database is created.
"""
_default_parameters = {
"database.uuid": lambda: str(uuid.uuid1()),
"database.create_date": lambda: datetime.datetime.now().strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT),
"web.base.url": lambda: "http://localhost:%s" % config.get('xmlrpc_port'),
}
class ir_config_parameter(osv.osv):
"""Per-database storage of configuration key-value pairs."""
_name = 'ir.config_parameter'
_columns = {
'key': fields.char('Key', size=256, required=True, select=1),
'value': fields.text('Value', required=True),
}
_sql_constraints = [
('key_uniq', 'unique (key)', 'Key must be unique.')
]
def init(self, cr, force=False):
"""
Initializes the parameters listed in _default_parameters.
It overrides existing parameters if force is ``True``.
"""
for key, func in _default_parameters.iteritems():
# force=True skips search and always performs the 'if' body (because ids=False)
ids = not force and self.search(cr, SUPERUSER_ID, [('key','=',key)])
if not ids:
self.set_param(cr, SUPERUSER_ID, key, func())
def get_param(self, cr, uid, key, default=False, context=None):
"""Retrieve the value for a given key.
:param string key: The key of the parameter value to retrieve.
:param string default: default value if parameter is missing.
:return: The value of the parameter, or ``default`` if it does not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
if not ids:
return default
param = self.browse(cr, uid, ids[0], context=context)
value = param.value
return value
def set_param(self, cr, uid, key, value, context=None):
"""Sets the value of a parameter.
:param string key: The key of the parameter value to set.
:param string value: The value to set.
:return: the previous value of the parameter or False if it did
not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
if ids:
param = self.browse(cr, uid, ids[0], context=context)
old = param.value
self.write(cr, uid, ids, {'value': value}, context=context)
return old
else:
self.create(cr, uid, {'key': key, 'value': value}, context=context)
return False
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_09_01/models/resource_group.py | 2 | 2131 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceGroup(Model):
"""Resource group information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The ID of the resource group.
:vartype id: str
:param name: The name of the resource group.
:type name: str
:param properties:
:type properties:
~azure.mgmt.resource.resources.v2016_09_01.models.ResourceGroupProperties
:param location: The location of the resource group. It cannot be changed
after the resource group has been created. It muct be one of the supported
Azure locations.
:type location: str
:param managed_by: The ID of the resource that manages this resource
group.
:type managed_by: str
:param tags: The tags attached to the resource group.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ResourceGroupProperties'},
'location': {'key': 'location', 'type': 'str'},
'managed_by': {'key': 'managedBy', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, location, name=None, properties=None, managed_by=None, tags=None):
super(ResourceGroup, self).__init__()
self.id = None
self.name = name
self.properties = properties
self.location = location
self.managed_by = managed_by
self.tags = tags
| mit |
updownlife/multipleK | dependencies/biopython-1.65/Bio/GA/Selection/RouletteWheel.py | 4 | 4824 | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Implement Roulette Wheel selection on a population.
This implements Roulette Wheel selection in which individuals are
selected from a population randomly, with their proportion of selection
based on their relative fitness in the population.
"""
# standard modules
import random
import copy
# local modules
from .Abstract import AbstractSelection
class RouletteWheelSelection(AbstractSelection):
"""Roulette wheel selection proportional to individuals fitness.
The implements a roulette wheel selector that selects individuals
from the population, and performs mutation and crossover on
the selected individuals.
"""
def __init__(self, mutator, crossover, repairer=None):
"""Initialize the selector.
Arguments:
o mutator -- A Mutation object which will perform mutation
on an individual.
o crossover -- A Crossover object which will take two
individuals and produce two new individuals which may
have had crossover occur.
o repairer -- A class which can do repair on rearranged genomes
to eliminate infeasible individuals. If set at None, so repair
will be done.
"""
AbstractSelection.__init__(self, mutator, crossover, repairer)
def select(self, population):
"""Perform selection on the population based using a Roulette model.
Arguments:
o population -- A population of organisms on which we will perform
selection. The individuals are assumed to have fitness values which
are due to their current genome.
"""
# set up the current probabilities for selecting organisms
# from the population
prob_wheel = self._set_up_wheel(population)
probs = sorted(prob_wheel)
# now create the new population with the same size as the original
new_population = []
for pair_spin in range(len(population) // 2):
# select two individuals using roulette wheel selection
choice_num_1 = random.random()
choice_num_2 = random.random()
# now grab the two organisms from the probabilities
chosen_org_1 = None
chosen_org_2 = None
prev_prob = 0
for cur_prob in probs:
if choice_num_1 > prev_prob and choice_num_1 <= cur_prob:
chosen_org_1 = prob_wheel[cur_prob]
if choice_num_2 > prev_prob and choice_num_2 <= cur_prob:
chosen_org_2 = prob_wheel[cur_prob]
prev_prob = cur_prob
assert chosen_org_1 is not None, "Didn't select organism one"
assert chosen_org_2 is not None, "Didn't select organism two"
# do mutation and crossover to get the new organisms
new_org_1, new_org_2 = self.mutate_and_crossover(chosen_org_1,
chosen_org_2)
new_population.extend([new_org_1, new_org_2])
return new_population
def _set_up_wheel(self, population):
"""Set up the roulette wheel based on the fitnesses.
This creates a fitness proportional 'wheel' that will be used for
selecting based on random numbers.
Returns:
o A dictionary where the keys are the 'high' value that an
individual will be selected. The low value is determined by
the previous key in a sorted list of keys. For instance, if we
have a sorted list of keys like:
[.1, .3, .7, 1]
Then the individual whose key is .1 will be selected if a number
between 0 and .1 is chosen, the individual whose key is .3 will
be selected if the number is between .1 and .3, and so on.
The values of the dictionary are the organism instances.
"""
# first sum up the total fitness in the population
total_fitness = 0
for org in population:
total_fitness += org.fitness
# now create the wheel dictionary for all of the individuals
wheel_dict = {}
total_percentage = 0
for org in population:
org_percentage = float(org.fitness) / float(total_fitness)
# the organisms chance of being picked goes from the previous
# percentage (total_percentage) to the previous percentage
# plus the organisms specific fitness percentage
wheel_dict[total_percentage + org_percentage] = copy.copy(org)
# keep a running total of where we are at in the percentages
total_percentage += org_percentage
return wheel_dict
| gpl-2.0 |
TransitApp/protobuf | python/google/protobuf/internal/wire_format.py | 561 | 8431 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Constants and static functions to support protocol buffer wire format."""
__author__ = 'robinson@google.com (Will Robinson)'
import struct
from google.protobuf import descriptor
from google.protobuf import message
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# Bounds for various integer types.
INT32_MAX = int((1 << 31) - 1)
INT32_MIN = int(-(1 << 31))
UINT32_MAX = (1 << 32) - 1
INT64_MAX = (1 << 63) - 1
INT64_MIN = -(1 << 63)
UINT64_MAX = (1 << 64) - 1
# "struct" format strings that will encode/decode the specified formats.
FORMAT_UINT32_LITTLE_ENDIAN = '<I'
FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
FORMAT_FLOAT_LITTLE_ENDIAN = '<f'
FORMAT_DOUBLE_LITTLE_ENDIAN = '<d'
# We'll have to provide alternate implementations of AppendLittleEndian*() on
# any architectures where these checks fail.
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
raise AssertionError('Format "I" is not a 32-bit number.')
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
raise AssertionError('Format "Q" is not a 64-bit number.')
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
def ZigZagEncode(value):
"""ZigZag Transform: Encodes signed integers so that they can be
effectively used with varint encoding. See wire_format.h for
more details.
"""
if value >= 0:
return value << 1
return (value << 1) ^ (~0)
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
# The *ByteSize() functions below return the number of bytes required to
# serialize "field number + type" information and then serialize the value.
def Int32ByteSize(field_number, int32):
return Int64ByteSize(field_number, int32)
def Int32ByteSizeNoTag(int32):
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
def Int64ByteSize(field_number, int64):
# Have to convert to uint before calling UInt64ByteSize().
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
def UInt32ByteSize(field_number, uint32):
return UInt64ByteSize(field_number, uint32)
def UInt64ByteSize(field_number, uint64):
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
def SInt32ByteSize(field_number, int32):
return UInt32ByteSize(field_number, ZigZagEncode(int32))
def SInt64ByteSize(field_number, int64):
return UInt64ByteSize(field_number, ZigZagEncode(int64))
def Fixed32ByteSize(field_number, fixed32):
return TagByteSize(field_number) + 4
def Fixed64ByteSize(field_number, fixed64):
return TagByteSize(field_number) + 8
def SFixed32ByteSize(field_number, sfixed32):
return TagByteSize(field_number) + 4
def SFixed64ByteSize(field_number, sfixed64):
return TagByteSize(field_number) + 8
def FloatByteSize(field_number, flt):
return TagByteSize(field_number) + 4
def DoubleByteSize(field_number, double):
return TagByteSize(field_number) + 8
def BoolByteSize(field_number, b):
return TagByteSize(field_number) + 1
def EnumByteSize(field_number, enum):
return UInt32ByteSize(field_number, enum)
def StringByteSize(field_number, string):
return BytesByteSize(field_number, string.encode('utf-8'))
def BytesByteSize(field_number, b):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(len(b))
+ len(b))
def GroupByteSize(field_number, message):
return (2 * TagByteSize(field_number) # START and END group.
+ message.ByteSize())
def MessageByteSize(field_number, message):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
+ message.ByteSize())
def MessageSetItemByteSize(field_number, msg):
# First compute the sizes of the tags.
# There are 2 tags for the beginning and ending of the repeated group, that
# is field number 1, one with field number 2 (type_id) and one with field
# number 3 (message).
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
# Add the number of bytes for type_id.
total_size += _VarUInt64ByteSizeNoTag(field_number)
message_size = msg.ByteSize()
# The number of bytes for encoding the length of the message.
total_size += _VarUInt64ByteSizeNoTag(message_size)
# The size of the message.
total_size += message_size
return total_size
def TagByteSize(field_number):
"""Returns the bytes required to serialize a tag with this field number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
# Private helper function for the *ByteSize() functions above.
def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10
NON_PACKABLE_TYPES = (
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_GROUP,
descriptor.FieldDescriptor.TYPE_MESSAGE,
descriptor.FieldDescriptor.TYPE_BYTES
)
def IsTypePackable(field_type):
"""Return true iff packable = true is valid for fields of this type.
Args:
field_type: a FieldDescriptor::Type value.
Returns:
True iff fields of this type are packable.
"""
return field_type not in NON_PACKABLE_TYPES
| bsd-3-clause |
intel-hpdd/intel-manager-for-lustre | tests/__init__.py | 1 | 3232 | import logging
import os
import sys
import threading
import time
chroma_logger = logging.getLogger("test")
chroma_logger.setLevel(logging.DEBUG)
try:
import nose
nose_installed = True
except ImportError:
nose_installed = False
if nose_installed:
# Monkey patch TextTestResult to print errors as they occur
def monkeyPatchedAddError(self, test, err):
super(nose.result.TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
self.stream.writeln(self._exc_info_to_string(err, test))
chroma_logger.error(self._exc_info_to_string(err, test))
elif self.dots:
self.stream.write("E")
self.stream.flush()
def monkeyPatchedAddFailure(self, test, err):
super(nose.result.TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
self.stream.writeln(self._exc_info_to_string(err, test))
chroma_logger.error(self._exc_info_to_string(err, test))
elif self.dots:
self.stream.write("F")
self.stream.flush()
nose.result.TextTestResult.chroma_logger = chroma_logger
nose.result.TextTestResult.addError = monkeyPatchedAddError
nose.result.TextTestResult.addFailure = monkeyPatchedAddFailure
# Monkey patch TextTestRunner to exit hard if there are hanging threads
def monkeyPatchedRun(self, test):
self.descriptions = 0
threads_at_beginning_of_test_run = threading.enumerate()
chroma_logger.info("Starting tests with these threads running: '%s'" % threads_at_beginning_of_test_run)
wrapper = self.config.plugins.prepareTest(test)
if wrapper is not None:
test = wrapper
wrapped = self.config.plugins.setOutputStream(self.stream)
if wrapped is not None:
self.stream = wrapped
result = self._makeResult()
start = time.time()
test(result)
stop = time.time()
result.printErrors()
result.printSummary(start, stop)
self.config.plugins.finalize(result)
def get_hanging_threads():
ending_threads = threading.enumerate()
hanging_threads = []
for thread in ending_threads:
if thread not in threads_at_beginning_of_test_run and thread.is_alive():
hanging_threads.append(thread)
return hanging_threads
# Give the threads some time to stop
running_time = 0
while running_time < 300 and get_hanging_threads():
time.sleep(5)
running_time += 5
chroma_logger.info("Ending tests with these threads running: '%s'" % threading.enumerate())
hanging_threads = get_hanging_threads()
if hanging_threads:
sys.stderr.write(
"\n********************\n\nTERMINATING TEST RUN - NOT ALL THREADS STOPPED AT END OF TESTS: '%s'\n\n********************\n"
% hanging_threads
)
os._exit(1)
return result
nose.core.TextTestRunner.chroma_logger = chroma_logger
nose.core.TextTestRunner.run = monkeyPatchedRun
| mit |
jimcunderwood/MissionPlanner | Lib/site-packages/numpy/lib/_datasource.py | 81 | 20609 | """A file interface for handling local and remote data files.
The goal of datasource is to abstract some of the file system operations when
dealing with data files so the researcher doesn't have to know all the
low-level details. Through datasource, a researcher can obtain and use a
file with one function call, regardless of location of the file.
DataSource is meant to augment standard python libraries, not replace them.
It should work seemlessly with standard file IO operations and the os module.
DataSource files can originate locally or remotely:
- local files : '/home/guido/src/local/data.txt'
- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
DataSource files can also be compressed or uncompressed. Currently only gzip
and bz2 are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
>>> ds = datasource.DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
>>> fp = ds.open('http://www.google.com/index.html')
>>>
>>> # Use the file as you normally would
>>> fp.read()
>>> fp.close()
"""
__docformat__ = "restructuredtext en"
import os
from shutil import rmtree, copyfile, copyfileobj
_open = open
# Using a class instead of a module-level dictionary
# to reduce the inital 'import numpy' overhead by
# deferring the import of bz2 and gzip until needed
# TODO: .zip support, .tar support?
class _FileOpeners(object):
"""
Container for different methods to open (un-)compressed files.
`_FileOpeners` contains a dictionary that holds one method for each
supported file format. Attribute lookup is implemented in such a way that
an instance of `_FileOpeners` itself can be indexed with the keys of that
dictionary. Currently uncompressed files as well as files
compressed with ``gzip`` or ``bz2`` compression are supported.
Notes
-----
`_file_openers`, an instance of `_FileOpeners`, is made available for
use in the `_datasource` module.
Examples
--------
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
True
"""
def __init__(self):
self._loaded = False
self._file_openers = {None: open}
def _load(self):
if self._loaded:
return
try:
import bz2
self._file_openers[".bz2"] = bz2.BZ2File
except ImportError:
pass
try:
import gzip
self._file_openers[".gz"] = gzip.open
except ImportError:
pass
self._loaded = True
def keys(self):
"""
Return the keys of currently supported file openers.
Parameters
----------
None
Returns
-------
keys : list
The keys are None for uncompressed files and the file extension
strings (i.e. ``'.gz'``, ``'.bz2'``) for supported compression
methods.
"""
self._load()
return self._file_openers.keys()
def __getitem__(self, key):
self._load()
return self._file_openers[key]
_file_openers = _FileOpeners()
def open(path, mode='r', destpath=os.curdir):
"""
Open `path` with `mode` and return the file object.
If ``path`` is an URL, it will be downloaded, stored in the `DataSource`
`destpath` directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : str, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by path.
Default is 'r'.
destpath : str, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created. The
default path is the current directory.
Returns
-------
out : file object
The opened file.
Notes
-----
This is a convenience function that instantiates a `DataSource` and
returns the file object from ``DataSource.open(path)``.
"""
ds = DataSource(destpath)
return ds.open(path, mode)
class DataSource (object):
"""
DataSource(destpath='.')
A generic data source file (file, http, ftp, ...).
DataSources can be local files or remote files/URLs. The files may
also be compressed or uncompressed. DataSource hides some of the low-level
details of downloading the file, allowing you to simply pass in a valid
file path (or URL) and obtain a file object.
Parameters
----------
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Notes
-----
URLs require a scheme string (``http://``) to be used, without it they
will fail::
>>> repos = DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
True
Temporary directories are deleted when the DataSource is deleted.
Examples
--------
::
>>> ds = DataSource('/home/guido')
>>> urlname = 'http://www.google.com/index.html'
>>> gfile = ds.open('http://www.google.com/index.html') # remote file
>>> ds.abspath(urlname)
'/home/guido/www.google.com/site/index.html'
>>> ds = DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
'/tmp/tmpy4pgsP/home/guido/foobar.txt'
"""
def __init__(self, destpath=os.curdir):
"""Create a DataSource with a local path at destpath."""
if destpath:
self._destpath = os.path.abspath(destpath)
self._istmpdest = False
else:
import tempfile # deferring import to improve startup time
self._destpath = tempfile.mkdtemp()
self._istmpdest = True
def __del__(self):
# Remove temp directories
if self._istmpdest:
rmtree(self._destpath)
def _iszip(self, filename):
"""Test if the filename is a zip file by looking at the file extension.
"""
fname, ext = os.path.splitext(filename)
return ext in _file_openers.keys()
def _iswritemode(self, mode):
"""Test if the given mode will open a file for writing."""
# Currently only used to test the bz2 files.
_writemodes = ("w", "+")
for c in mode:
if c in _writemodes:
return True
return False
def _splitzipext(self, filename):
"""Split zip extension from filename and return filename.
*Returns*:
base, zip_ext : {tuple}
"""
if self._iszip(filename):
return os.path.splitext(filename)
else:
return filename, None
def _possible_names(self, filename):
"""Return a tuple containing compressed filename variations."""
names = [filename]
if not self._iszip(filename):
for zipext in _file_openers.keys():
if zipext:
names.append(filename+zipext)
return names
def _isurl(self, path):
"""Test if path is a net location. Tests the scheme and netloc."""
# We do this here to reduce the 'import numpy' initial import time.
from urlparse import urlparse
# BUG : URLs require a scheme string ('http://') to be used.
# www.google.com will fail.
# Should we prepend the scheme for those that don't have it and
# test that also? Similar to the way we append .gz and test for
# for compressed versions of files.
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
return bool(scheme and netloc)
def _cache(self, path):
"""Cache the file specified by path.
Creates a copy of the file in the datasource cache.
"""
# We import these here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib2 import urlopen
from urllib2 import URLError
upath = self.abspath(path)
# ensure directory exists
if not os.path.exists(os.path.dirname(upath)):
os.makedirs(os.path.dirname(upath))
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
openedurl = urlopen(path)
f = _open(upath, 'wb')
try:
copyfileobj(openedurl, f)
finally:
f.close()
except URLError:
raise URLError("URL not found: %s" % path)
else:
shutil.copyfile(path, upath)
return upath
def _findfile(self, path):
"""Searches for ``path`` and returns full path if found.
If path is an URL, _findfile will cache a local copy and return
the path to the cached file.
If path is a local file, _findfile will return a path to that local
file.
The search will include possible compressed versions of the file and
return the first occurence found.
"""
# Build list of possible local file paths
if not self._isurl(path):
# Valid local paths
filelist = self._possible_names(path)
# Paths in self._destpath
filelist += self._possible_names(self.abspath(path))
else:
# Cached URLs in self._destpath
filelist = self._possible_names(self.abspath(path))
# Remote URLs
filelist = filelist + self._possible_names(path)
for name in filelist:
if self.exists(name):
if self._isurl(name):
name = self._cache(name)
return name
return None
def abspath(self, path):
"""
Return absolute path of file in the DataSource directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
Notes
-----
The functionality is based on `os.path.abspath`.
"""
# We do this here to reduce the 'import numpy' initial import time.
from urlparse import urlparse
# TODO: This should be more robust. Handles case where path includes
# the destpath, but not other sub-paths. Failing case:
# path = /home/guido/datafile.txt
# destpath = /home/alex/
# upath = self.abspath(path)
# upath == '/home/alex/home/guido/datafile.txt'
# handle case where path includes self._destpath
splitpath = path.split(self._destpath, 2)
if len(splitpath) > 1:
path = splitpath[1]
scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
netloc = self._sanitize_relative_path(netloc)
upath = self._sanitize_relative_path(upath)
return os.path.join(self._destpath, netloc, upath)
def _sanitize_relative_path(self, path):
"""Return a sanitised relative path for which
os.path.abspath(os.path.join(base, path)).startswith(base)
"""
last = None
path = os.path.normpath(path)
while path != last:
last = path
# Note: os.path.join treats '/' as os.sep on Windows
path = path.lstrip(os.sep).lstrip('/')
path = path.lstrip(os.pardir).lstrip('..')
drive, path = os.path.splitdrive(path) # for Windows
return path
def exists(self, path):
"""
Test if path exists.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and accessible.
Parameters
----------
path : str
Can be a local file or a remote URL.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
from urllib2 import urlopen
from urllib2 import URLError
# Test local path
if os.path.exists(path):
return True
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
return True
# Test remote url
if self._isurl(path):
try:
netfile = urlopen(path)
del(netfile)
return True
except URLError:
return False
return False
def open(self, path, mode='r'):
"""
Open and return file-like object.
If `path` is an URL, it will be downloaded, stored in the `DataSource`
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
# TODO: There is no support for opening a file for writing which
# doesn't exist yet (creating a file). Should there be?
# TODO: Add a ``subdir`` parameter for specifying the subdirectory
# used to store URLs in self._destpath.
if self._isurl(path) and self._iswritemode(mode):
raise ValueError("URLs are not writeable")
# NOTE: _findfile will fail on a new file opened for writing.
found = self._findfile(path)
if found:
_fname, ext = self._splitzipext(found)
if ext == 'bz2':
mode.replace("+", "")
return _file_openers[ext](found, mode=mode)
else:
raise IOError("%s not found." % path)
class Repository (DataSource):
"""
Repository(baseurl, destpath='.')
A data repository where multiple DataSource's share a base URL/directory.
`Repository` extends `DataSource` by prepending a base URL (or directory)
to all the files it handles. Use `Repository` when you will be working
with multiple files from one base URL. Initialize `Repository` with the
base URL, then refer to each file by its filename only.
Parameters
----------
baseurl : str
Path to the local directory or remote location that contains the
data files.
destpath : str or None, optional
Path to the directory where the source file gets downloaded to for use.
If `destpath` is None, a temporary directory will be created.
The default path is the current directory.
Examples
--------
To analyze all files in the repository, do something like this
(note: this is not self-contained code)::
>>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
>>> for filename in filelist:
... fp = repos.open(filename)
... fp.analyze()
... fp.close()
Similarly you could use a URL for a repository::
>>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
"""
def __init__(self, baseurl, destpath=os.curdir):
"""Create a Repository with a shared url or directory of baseurl."""
DataSource.__init__(self, destpath=destpath)
self._baseurl = baseurl
def __del__(self):
DataSource.__del__(self)
def _fullpath(self, path):
"""Return complete path for path. Prepends baseurl if necessary."""
splitpath = path.split(self._baseurl, 2)
if len(splitpath) == 1:
result = os.path.join(self._baseurl, path)
else:
result = path # path contains baseurl already
return result
def _findfile(self, path):
"""Extend DataSource method to prepend baseurl to ``path``."""
return DataSource._findfile(self, self._fullpath(path))
def abspath(self, path):
"""
Return absolute path of file in the Repository directory.
If `path` is an URL, then `abspath` will return either the location
the file exists locally or the location it would exist when opened
using the `open` method.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : str
Complete path, including the `DataSource` destination directory.
"""
return DataSource.abspath(self, self._fullpath(path))
def exists(self, path):
"""
Test if path exists prepending Repository base URL to path.
Test if `path` exists as (and in this order):
- a local file.
- a remote URL that has been downloaded and stored locally in the
`DataSource` directory.
- a remote URL that has not been downloaded, but is valid and
accessible.
Parameters
----------
path : str
Can be a local file or a remote URL. This may, but does not have
to, include the `baseurl` with which the `Repository` was initialized.
Returns
-------
out : bool
True if `path` exists.
Notes
-----
When `path` is an URL, `exists` will return True if it's either stored
locally in the `DataSource` directory, or is a valid remote URL.
`DataSource` does not discriminate between the two, the file is accessible
if it exists in either location.
"""
return DataSource.exists(self, self._fullpath(path))
def open(self, path, mode='r'):
"""
Open and return file-like object prepending Repository base URL.
If `path` is an URL, it will be downloaded, stored in the DataSource
directory and opened from there.
Parameters
----------
path : str
Local file path or URL to open. This may, but does not have to,
include the `baseurl` with which the `Repository` was initialized.
mode : {'r', 'w', 'a'}, optional
Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
append. Available modes depend on the type of object specified by
`path`. Default is 'r'.
Returns
-------
out : file object
File object.
"""
return DataSource.open(self, self._fullpath(path), mode)
def listdir(self):
"""
List files in the source Repository.
Returns
-------
files : list of str
List of file names (not containing a directory part).
Notes
-----
Does not currently work for remote repositories.
"""
if self._isurl(self._baseurl):
raise NotImplementedError, \
"Directory listing of URLs, not supported yet."
else:
return os.listdir(self._baseurl)
| gpl-3.0 |
denisff/python-for-android | python-build/python-libs/gdata/src/gdata/gauth.py | 135 | 41554 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides auth related token classes and functions for Google Data APIs.
Token classes represent a user's authorization of this app to access their
data. Usually these are not created directly but by a GDClient object.
ClientLoginToken
AuthSubToken
SecureAuthSubToken
OAuthHmacToken
OAuthRsaToken
Functions which are often used in application code (as opposed to just within
the gdata-python-client library) are the following:
generate_auth_sub_url
authorize_request_token
The following are helper functions which are used to save and load auth token
objects in the App Engine datastore. These should only be used if you are using
this library within App Engine:
ae_load
ae_save
"""
import time
import random
import urllib
import atom.http_core
__author__ = 'j.s@google.com (Jeff Scudder)'
PROGRAMMATIC_AUTH_LABEL = 'GoogleLogin auth='
AUTHSUB_AUTH_LABEL = 'AuthSub token='
class Error(Exception):
pass
class UnsupportedTokenType(Error):
"""Raised when token to or from blob is unable to convert the token."""
pass
# ClientLogin functions and classes.
def generate_client_login_request_body(email, password, service, source,
account_type='HOSTED_OR_GOOGLE', captcha_token=None,
captcha_response=None):
"""Creates the body of the autentication request
See http://code.google.com/apis/accounts/AuthForInstalledApps.html#Request
for more details.
Args:
email: str
password: str
service: str
source: str
account_type: str (optional) Defaul is 'HOSTED_OR_GOOGLE', other valid
values are 'GOOGLE' and 'HOSTED'
captcha_token: str (optional)
captcha_response: str (optional)
Returns:
The HTTP body to send in a request for a client login token.
"""
# Create a POST body containing the user's credentials.
request_fields = {'Email': email,
'Passwd': password,
'accountType': account_type,
'service': service,
'source': source}
if captcha_token and captcha_response:
# Send the captcha token and response as part of the POST body if the
# user is responding to a captch challenge.
request_fields['logintoken'] = captcha_token
request_fields['logincaptcha'] = captcha_response
return urllib.urlencode(request_fields)
GenerateClientLoginRequestBody = generate_client_login_request_body
def get_client_login_token_string(http_body):
"""Returns the token value for a ClientLoginToken.
Reads the token from the server's response to a Client Login request and
creates the token value string to use in requests.
Args:
http_body: str The body of the server's HTTP response to a Client Login
request
Returns:
The token value string for a ClientLoginToken.
"""
for response_line in http_body.splitlines():
if response_line.startswith('Auth='):
# Strip off the leading Auth= and return the Authorization value.
return response_line[5:]
return None
GetClientLoginTokenString = get_client_login_token_string
def get_captcha_challenge(http_body,
captcha_base_url='http://www.google.com/accounts/'):
"""Returns the URL and token for a CAPTCHA challenge issued by the server.
Args:
http_body: str The body of the HTTP response from the server which
contains the CAPTCHA challenge.
captcha_base_url: str This function returns a full URL for viewing the
challenge image which is built from the server's response. This
base_url is used as the beginning of the URL because the server
only provides the end of the URL. For example the server provides
'Captcha?ctoken=Hi...N' and the URL for the image is
'http://www.google.com/accounts/Captcha?ctoken=Hi...N'
Returns:
A dictionary containing the information needed to repond to the CAPTCHA
challenge, the image URL and the ID token of the challenge. The
dictionary is in the form:
{'token': string identifying the CAPTCHA image,
'url': string containing the URL of the image}
Returns None if there was no CAPTCHA challenge in the response.
"""
contains_captcha_challenge = False
captcha_parameters = {}
for response_line in http_body.splitlines():
if response_line.startswith('Error=CaptchaRequired'):
contains_captcha_challenge = True
elif response_line.startswith('CaptchaToken='):
# Strip off the leading CaptchaToken=
captcha_parameters['token'] = response_line[13:]
elif response_line.startswith('CaptchaUrl='):
captcha_parameters['url'] = '%s%s' % (captcha_base_url,
response_line[11:])
if contains_captcha_challenge:
return captcha_parameters
else:
return None
GetCaptchaChallenge = get_captcha_challenge
class ClientLoginToken(object):
def __init__(self, token_string):
self.token_string = token_string
def modify_request(self, http_request):
http_request.headers['Authorization'] = '%s%s' % (PROGRAMMATIC_AUTH_LABEL,
self.token_string)
ModifyRequest = modify_request
# AuthSub functions and classes.
def _to_uri(str_or_uri):
if isinstance(str_or_uri, (str, unicode)):
return atom.http_core.Uri.parse_uri(str_or_uri)
return str_or_uri
def generate_auth_sub_url(next, scopes, secure=False, session=True,
request_url=atom.http_core.parse_uri(
'https://www.google.com/accounts/AuthSubRequest'),
domain='default', scopes_param_prefix='auth_sub_scopes'):
"""Constructs a URI for requesting a multiscope AuthSub token.
The generated token will contain a URL parameter to pass along the
requested scopes to the next URL. When the Google Accounts page
redirects the broswser to the 'next' URL, it appends the single use
AuthSub token value to the URL as a URL parameter with the key 'token'.
However, the information about which scopes were requested is not
included by Google Accounts. This method adds the scopes to the next
URL before making the request so that the redirect will be sent to
a page, and both the token value and the list of scopes for which the token
was requested.
Args:
next: atom.http_core.Uri or string The URL user will be sent to after
authorizing this web application to access their data.
scopes: list containint strings or atom.http_core.Uri objects. The URLs
of the services to be accessed.
secure: boolean (optional) Determines whether or not the issued token
is a secure token.
session: boolean (optional) Determines whether or not the issued token
can be upgraded to a session token.
request_url: atom.http_core.Uri or str The beginning of the request URL.
This is normally
'http://www.google.com/accounts/AuthSubRequest' or
'/accounts/AuthSubRequest'
domain: The domain which the account is part of. This is used for Google
Apps accounts, the default value is 'default' which means that
the requested account is a Google Account (@gmail.com for
example)
scopes_param_prefix: str (optional) The requested scopes are added as a
URL parameter to the next URL so that the page at
the 'next' URL can extract the token value and the
valid scopes from the URL. The key for the URL
parameter defaults to 'auth_sub_scopes'
Returns:
An atom.http_core.Uri which the user's browser should be directed to in
order to authorize this application to access their information.
"""
if isinstance(next, (str, unicode)):
next = atom.http_core.Uri.parse_uri(next)
scopes_string = ' '.join([str(scope) for scope in scopes])
next.query[scopes_param_prefix] = scopes_string
if isinstance(request_url, (str, unicode)):
request_url = atom.http_core.Uri.parse_uri(request_url)
request_url.query['next'] = str(next)
request_url.query['scope'] = scopes_string
if session:
request_url.query['session'] = '1'
else:
request_url.query['session'] = '0'
if secure:
request_url.query['secure'] = '1'
else:
request_url.query['secure'] = '0'
request_url.query['hd'] = domain
return request_url
def auth_sub_string_from_url(url, scopes_param_prefix='auth_sub_scopes'):
"""Finds the token string (and scopes) after the browser is redirected.
After the Google Accounts AuthSub pages redirect the user's broswer back to
the web application (using the 'next' URL from the request) the web app must
extract the token from the current page's URL. The token is provided as a
URL parameter named 'token' and if generate_auth_sub_url was used to create
the request, the token's valid scopes are included in a URL parameter whose
name is specified in scopes_param_prefix.
Args:
url: atom.url.Url or str representing the current URL. The token value
and valid scopes should be included as URL parameters.
scopes_param_prefix: str (optional) The URL parameter key which maps to
the list of valid scopes for the token.
Returns:
A tuple containing the token value as a string, and a tuple of scopes
(as atom.http_core.Uri objects) which are URL prefixes under which this
token grants permission to read and write user data.
(token_string, (scope_uri, scope_uri, scope_uri, ...))
If no scopes were included in the URL, the second value in the tuple is
None. If there was no token param in the url, the tuple returned is
(None, None)
"""
if isinstance(url, (str, unicode)):
url = atom.http_core.Uri.parse_uri(url)
if 'token' not in url.query:
return (None, None)
token = url.query['token']
# TODO: decide whether no scopes should be None or ().
scopes = None # Default to None for no scopes.
if scopes_param_prefix in url.query:
scopes = tuple(url.query[scopes_param_prefix].split(' '))
return (token, scopes)
AuthSubStringFromUrl = auth_sub_string_from_url
def auth_sub_string_from_body(http_body):
"""Extracts the AuthSub token from an HTTP body string.
Used to find the new session token after making a request to upgrade a
single use AuthSub token.
Args:
http_body: str The repsonse from the server which contains the AuthSub
key. For example, this function would find the new session token
from the server's response to an upgrade token request.
Returns:
The raw token value string to use in an AuthSubToken object.
"""
for response_line in http_body.splitlines():
if response_line.startswith('Token='):
# Strip off Token= and return the token value string.
return response_line[6:]
return None
class AuthSubToken(object):
def __init__(self, token_string, scopes=None):
self.token_string = token_string
self.scopes = scopes or []
def modify_request(self, http_request):
"""Sets Authorization header, allows app to act on the user's behalf."""
http_request.headers['Authorization'] = '%s%s' % (AUTHSUB_AUTH_LABEL,
self.token_string)
ModifyRequest = modify_request
def from_url(str_or_uri):
"""Creates a new AuthSubToken using information in the URL.
Uses auth_sub_string_from_url.
Args:
str_or_uri: The current page's URL (as a str or atom.http_core.Uri)
which should contain a token query parameter since the
Google auth server redirected the user's browser to this
URL.
"""
token_and_scopes = auth_sub_string_from_url(str_or_uri)
return AuthSubToken(token_and_scopes[0], token_and_scopes[1])
from_url = staticmethod(from_url)
FromUrl = from_url
def _upgrade_token(self, http_body):
"""Replaces the token value with a session token from the auth server.
Uses the response of a token upgrade request to modify this token. Uses
auth_sub_string_from_body.
"""
self.token_string = auth_sub_string_from_body(http_body)
# Functions and classes for Secure-mode AuthSub
def build_auth_sub_data(http_request, timestamp, nonce):
"""Creates the data string which must be RSA-signed in secure requests.
For more details see the documenation on secure AuthSub requests:
http://code.google.com/apis/accounts/docs/AuthSub.html#signingrequests
Args:
http_request: The request being made to the server. The Request's URL
must be complete before this signature is calculated as any changes
to the URL will invalidate the signature.
nonce: str Random 64-bit, unsigned number encoded as an ASCII string in
decimal format. The nonce/timestamp pair should always be unique to
prevent replay attacks.
timestamp: Integer representing the time the request is sent. The
timestamp should be expressed in number of seconds after January 1,
1970 00:00:00 GMT.
"""
return '%s %s %s %s' % (http_request.method, str(http_request.uri),
str(timestamp), nonce)
def generate_signature(data, rsa_key):
"""Signs the data string for a secure AuthSub request."""
import base64
try:
from tlslite.utils import keyfactory
except ImportError:
from gdata.tlslite.utils import keyfactory
private_key = keyfactory.parsePrivateKey(rsa_key)
signed = private_key.hashAndSign(data)
return base64.b64encode(signed)
class SecureAuthSubToken(AuthSubToken):
def __init__(self, token_string, rsa_private_key, scopes=None):
self.token_string = token_string
self.scopes = scopes or []
self.rsa_private_key = rsa_private_key
def from_url(str_or_uri, rsa_private_key):
"""Creates a new SecureAuthSubToken using information in the URL.
Uses auth_sub_string_from_url.
Args:
str_or_uri: The current page's URL (as a str or atom.http_core.Uri)
which should contain a token query parameter since the Google auth
server redirected the user's browser to this URL.
rsa_private_key: str the private RSA key cert used to sign all requests
made with this token.
"""
token_and_scopes = auth_sub_string_from_url(str_or_uri)
return SecureAuthSubToken(token_and_scopes[0], rsa_private_key,
token_and_scopes[1])
from_url = staticmethod(from_url)
FromUrl = from_url
def modify_request(self, http_request):
"""Sets the Authorization header and includes a digital signature.
Calculates a digital signature using the private RSA key, a timestamp
(uses now at the time this method is called) and a random nonce.
Args:
http_request: The atom.http_core.HttpRequest which contains all of the
information needed to send a request to the remote server. The
URL and the method of the request must be already set and cannot be
changed after this token signs the request, or the signature will
not be valid.
"""
timestamp = str(int(time.time()))
nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)])
data = build_auth_sub_data(http_request, timestamp, nonce)
signature = generate_signature(data, self.rsa_private_key)
http_request.headers['Authorization'] = (
'%s%s sigalg="rsa-sha1" data="%s" sig="%s"' % (AUTHSUB_AUTH_LABEL,
self.token_string, data, signature))
ModifyRequest = modify_request
# OAuth functions and classes.
RSA_SHA1 = 'RSA-SHA1'
HMAC_SHA1 = 'HMAC-SHA1'
def build_oauth_base_string(http_request, consumer_key, nonce, signaure_type,
timestamp, version, next='oob', token=None,
verifier=None):
"""Generates the base string to be signed in the OAuth request.
Args:
http_request: The request being made to the server. The Request's URL
must be complete before this signature is calculated as any changes
to the URL will invalidate the signature.
consumer_key: Domain identifying the third-party web application. This is
the domain used when registering the application with Google. It
identifies who is making the request on behalf of the user.
nonce: Random 64-bit, unsigned number encoded as an ASCII string in decimal
format. The nonce/timestamp pair should always be unique to prevent
replay attacks.
signaure_type: either RSA_SHA1 or HMAC_SHA1
timestamp: Integer representing the time the request is sent. The
timestamp should be expressed in number of seconds after January 1,
1970 00:00:00 GMT.
version: The OAuth version used by the requesting web application. This
value must be '1.0' or '1.0a'. If not provided, Google assumes version
1.0 is in use.
next: The URL the user should be redirected to after granting access
to a Google service(s). It can include url-encoded query parameters.
The default value is 'oob'. (This is the oauth_callback.)
token: The string for the OAuth request token or OAuth access token.
verifier: str Sent as the oauth_verifier and required when upgrading a
request token to an access token.
"""
# First we must build the canonical base string for the request.
params = http_request.uri.query.copy()
params['oauth_consumer_key'] = consumer_key
params['oauth_nonce'] = nonce
params['oauth_signature_method'] = signaure_type
params['oauth_timestamp'] = str(timestamp)
if next is not None:
params['oauth_callback'] = str(next)
if token is not None:
params['oauth_token'] = token
if version is not None:
params['oauth_version'] = version
if verifier is not None:
params['oauth_verifier'] = verifier
# We need to get the key value pairs in lexigraphically sorted order.
sorted_keys = sorted(params.keys())
pairs = []
for key in sorted_keys:
pairs.append('%s=%s' % (urllib.quote(key, safe='~'),
urllib.quote(params[key], safe='~')))
# We want to escape /'s too, so use safe='~'
all_parameters = urllib.quote('&'.join(pairs), safe='~')
normailzed_host = http_request.uri.host.lower()
normalized_scheme = (http_request.uri.scheme or 'http').lower()
non_default_port = None
if (http_request.uri.port is not None
and ((normalized_scheme == 'https' and http_request.uri.port != 443)
or (normalized_scheme == 'http' and http_request.uri.port != 80))):
non_default_port = http_request.uri.port
path = http_request.uri.path or '/'
request_path = None
if not path.startswith('/'):
path = '/%s' % path
if non_default_port is not None:
# Set the only safe char in url encoding to ~ since we want to escape /
# as well.
request_path = urllib.quote('%s://%s:%s%s' % (
normalized_scheme, normailzed_host, non_default_port, path), safe='~')
else:
# Set the only safe char in url encoding to ~ since we want to escape /
# as well.
request_path = urllib.quote('%s://%s%s' % (
normalized_scheme, normailzed_host, path), safe='~')
# TODO: ensure that token escaping logic is correct, not sure if the token
# value should be double escaped instead of single.
base_string = '&'.join((http_request.method.upper(), request_path,
all_parameters))
# Now we have the base string, we can calculate the oauth_signature.
return base_string
def generate_hmac_signature(http_request, consumer_key, consumer_secret,
timestamp, nonce, version, next='oob',
token=None, token_secret=None, verifier=None):
import hmac
import base64
base_string = build_oauth_base_string(
http_request, consumer_key, nonce, HMAC_SHA1, timestamp, version,
next, token, verifier=verifier)
hash_key = None
hashed = None
if token_secret is not None:
hash_key = '%s&%s' % (urllib.quote(consumer_secret, safe='~'),
urllib.quote(token_secret, safe='~'))
else:
hash_key = '%s&' % urllib.quote(consumer_secret, safe='~')
try:
import hashlib
hashed = hmac.new(hash_key, base_string, hashlib.sha1)
except ImportError:
import sha
hashed = hmac.new(hash_key, base_string, sha)
return base64.b64encode(hashed.digest())
def generate_rsa_signature(http_request, consumer_key, rsa_key,
timestamp, nonce, version, next='oob',
token=None, token_secret=None, verifier=None):
import base64
try:
from tlslite.utils import keyfactory
except ImportError:
from gdata.tlslite.utils import keyfactory
base_string = build_oauth_base_string(
http_request, consumer_key, nonce, RSA_SHA1, timestamp, version,
next, token, verifier=verifier)
private_key = keyfactory.parsePrivateKey(rsa_key)
# Sign using the key
signed = private_key.hashAndSign(base_string)
return base64.b64encode(signed)
def generate_auth_header(consumer_key, timestamp, nonce, signature_type,
signature, version='1.0', next=None, token=None,
verifier=None):
"""Builds the Authorization header to be sent in the request.
Args:
consumer_key: Identifies the application making the request (str).
timestamp:
nonce:
signature_type: One of either HMAC_SHA1 or RSA_SHA1
signature: The HMAC or RSA signature for the request as a base64
encoded string.
version: The version of the OAuth protocol that this request is using.
Default is '1.0'
next: The URL of the page that the user's browser should be sent to
after they authorize the token. (Optional)
token: str The OAuth token value to be used in the oauth_token parameter
of the header.
verifier: str The OAuth verifier which must be included when you are
upgrading a request token to an access token.
"""
params = {
'oauth_consumer_key': consumer_key,
'oauth_version': version,
'oauth_nonce': nonce,
'oauth_timestamp': str(timestamp),
'oauth_signature_method': signature_type,
'oauth_signature': signature}
if next is not None:
params['oauth_callback'] = str(next)
if token is not None:
params['oauth_token'] = token
if verifier is not None:
params['oauth_verifier'] = verifier
pairs = [
'%s="%s"' % (
k, urllib.quote(v, safe='~')) for k, v in params.iteritems()]
return 'OAuth %s' % (', '.join(pairs))
REQUEST_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetRequestToken'
ACCESS_TOKEN_URL = 'https://www.google.com/accounts/OAuthGetAccessToken'
def generate_request_for_request_token(
consumer_key, signature_type, scopes, rsa_key=None, consumer_secret=None,
auth_server_url=REQUEST_TOKEN_URL, next='oob', version='1.0'):
"""Creates request to be sent to auth server to get an OAuth request token.
Args:
consumer_key:
signature_type: either RSA_SHA1 or HMAC_SHA1. The rsa_key must be
provided if the signature type is RSA but if the signature method
is HMAC, the consumer_secret must be used.
scopes: List of URL prefixes for the data which we want to access. For
example, to request access to the user's Blogger and Google Calendar
data, we would request
['http://www.blogger.com/feeds/',
'https://www.google.com/calendar/feeds/',
'http://www.google.com/calendar/feeds/']
rsa_key: Only used if the signature method is RSA_SHA1.
consumer_secret: Only used if the signature method is HMAC_SHA1.
auth_server_url: The URL to which the token request should be directed.
Defaults to 'https://www.google.com/accounts/OAuthGetRequestToken'.
next: The URL of the page that the user's browser should be sent to
after they authorize the token. (Optional)
version: The OAuth version used by the requesting web application.
Defaults to '1.0a'
Returns:
An atom.http_core.HttpRequest object with the URL, Authorization header
and body filled in.
"""
request = atom.http_core.HttpRequest(auth_server_url, 'POST')
# Add the requested auth scopes to the Auth request URL.
if scopes:
request.uri.query['scope'] = ' '.join(scopes)
timestamp = str(int(time.time()))
nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)])
signature = None
if signature_type == HMAC_SHA1:
signature = generate_hmac_signature(
request, consumer_key, consumer_secret, timestamp, nonce, version,
next=next)
elif signature_type == RSA_SHA1:
signature = generate_rsa_signature(
request, consumer_key, rsa_key, timestamp, nonce, version, next=next)
else:
return None
request.headers['Authorization'] = generate_auth_header(
consumer_key, timestamp, nonce, signature_type, signature, version,
next)
request.headers['Content-Length'] = '0'
return request
def generate_request_for_access_token(
request_token, auth_server_url=ACCESS_TOKEN_URL):
"""Creates a request to ask the OAuth server for an access token.
Requires a request token which the user has authorized. See the
documentation on OAuth with Google Data for more details:
http://code.google.com/apis/accounts/docs/OAuth.html#AccessToken
Args:
request_token: An OAuthHmacToken or OAuthRsaToken which the user has
approved using their browser.
auth_server_url: (optional) The URL at which the OAuth access token is
requested. Defaults to
https://www.google.com/accounts/OAuthGetAccessToken
Returns:
A new HttpRequest object which can be sent to the OAuth server to
request an OAuth Access Token.
"""
http_request = atom.http_core.HttpRequest(auth_server_url, 'POST')
http_request.headers['Content-Length'] = '0'
return request_token.modify_request(http_request)
def oauth_token_info_from_body(http_body):
"""Exracts an OAuth request token from the server's response.
Returns:
A tuple of strings containing the OAuth token and token secret. If
neither of these are present in the body, returns (None, None)
"""
token = None
token_secret = None
for pair in http_body.split('&'):
if pair.startswith('oauth_token='):
token = urllib.unquote(pair[len('oauth_token='):])
if pair.startswith('oauth_token_secret='):
token_secret = urllib.unquote(pair[len('oauth_token_secret='):])
return (token, token_secret)
def hmac_token_from_body(http_body, consumer_key, consumer_secret,
auth_state):
token_value, token_secret = oauth_token_info_from_body(http_body)
token = OAuthHmacToken(consumer_key, consumer_secret, token_value,
token_secret, auth_state)
return token
def rsa_token_from_body(http_body, consumer_key, rsa_private_key,
auth_state):
token_value, token_secret = oauth_token_info_from_body(http_body)
token = OAuthRsaToken(consumer_key, rsa_private_key, token_value,
token_secret, auth_state)
return token
DEFAULT_DOMAIN = 'default'
OAUTH_AUTHORIZE_URL = 'https://www.google.com/accounts/OAuthAuthorizeToken'
def generate_oauth_authorization_url(
token, next=None, hd=DEFAULT_DOMAIN, hl=None, btmpl=None,
auth_server=OAUTH_AUTHORIZE_URL):
"""Creates a URL for the page where the request token can be authorized.
Args:
token: str The request token from the OAuth server.
next: str (optional) URL the user should be redirected to after granting
access to a Google service(s). It can include url-encoded query
parameters.
hd: str (optional) Identifies a particular hosted domain account to be
accessed (for example, 'mycollege.edu'). Uses 'default' to specify a
regular Google account ('username@gmail.com').
hl: str (optional) An ISO 639 country code identifying what language the
approval page should be translated in (for example, 'hl=en' for
English). The default is the user's selected language.
btmpl: str (optional) Forces a mobile version of the approval page. The
only accepted value is 'mobile'.
auth_server: str (optional) The start of the token authorization web
page. Defaults to
'https://www.google.com/accounts/OAuthAuthorizeToken'
Returns:
An atom.http_core.Uri pointing to the token authorization page where the
user may allow or deny this app to access their Google data.
"""
uri = atom.http_core.Uri.parse_uri(auth_server)
uri.query['oauth_token'] = token
uri.query['hd'] = hd
if next is not None:
uri.query['oauth_callback'] = str(next)
if hl is not None:
uri.query['hl'] = hl
if btmpl is not None:
uri.query['btmpl'] = btmpl
return uri
def oauth_token_info_from_url(url):
"""Exracts an OAuth access token from the redirected page's URL.
Returns:
A tuple of strings containing the OAuth token and the OAuth verifier which
need to sent when upgrading a request token to an access token.
"""
if isinstance(url, (str, unicode)):
url = atom.http_core.Uri.parse_uri(url)
token = None
verifier = None
if 'oauth_token' in url.query:
token = urllib.unquote(url.query['oauth_token'])
if 'oauth_verifier' in url.query:
verifier = urllib.unquote(url.query['oauth_verifier'])
return (token, verifier)
def authorize_request_token(request_token, url):
"""Adds information to request token to allow it to become an access token.
Modifies the request_token object passed in by setting and unsetting the
necessary fields to allow this token to form a valid upgrade request.
Args:
request_token: The OAuth request token which has been authorized by the
user. In order for this token to be upgraded to an access token,
certain fields must be extracted from the URL and added to the token
so that they can be passed in an upgrade-token request.
url: The URL of the current page which the user's browser was redirected
to after they authorized access for the app. This function extracts
information from the URL which is needed to upgraded the token from
a request token to an access token.
Returns:
The same token object which was passed in.
"""
token, verifier = oauth_token_info_from_url(url)
request_token.token = token
request_token.verifier = verifier
request_token.auth_state = AUTHORIZED_REQUEST_TOKEN
return request_token
AuthorizeRequestToken = authorize_request_token
def upgrade_to_access_token(request_token, server_response_body):
"""Extracts access token information from response to an upgrade request.
Once the server has responded with the new token info for the OAuth
access token, this method modifies the request_token to set and unset
necessary fields to create valid OAuth authorization headers for requests.
Args:
request_token: An OAuth token which this function modifies to allow it
to be used as an access token.
server_response_body: str The server's response to an OAuthAuthorizeToken
request. This should contain the new token and token_secret which
are used to generate the signature and parameters of the Authorization
header in subsequent requests to Google Data APIs.
Returns:
The same token object which was passed in.
"""
token, token_secret = oauth_token_info_from_body(server_response_body)
request_token.token = token
request_token.token_secret = token_secret
request_token.auth_state = ACCESS_TOKEN
request_token.next = None
request_token.verifier = None
return request_token
UpgradeToAccessToken = upgrade_to_access_token
REQUEST_TOKEN = 1
AUTHORIZED_REQUEST_TOKEN = 2
ACCESS_TOKEN = 3
class OAuthHmacToken(object):
SIGNATURE_METHOD = HMAC_SHA1
def __init__(self, consumer_key, consumer_secret, token, token_secret,
auth_state, next=None, verifier=None):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.token = token
self.token_secret = token_secret
self.auth_state = auth_state
self.next = next
self.verifier = verifier # Used to convert request token to access token.
def generate_authorization_url(
self, google_apps_domain=DEFAULT_DOMAIN, language=None, btmpl=None,
auth_server=OAUTH_AUTHORIZE_URL):
"""Creates the URL at which the user can authorize this app to access.
Args:
google_apps_domain: str (optional) If the user should be signing in
using an account under a known Google Apps domain, provide the
domain name ('example.com') here. If not provided, 'default'
will be used, and the user will be prompted to select an account
if they are signed in with a Google Account and Google Apps
accounts.
language: str (optional) An ISO 639 country code identifying what
language the approval page should be translated in (for example,
'en' for English). The default is the user's selected language.
btmpl: str (optional) Forces a mobile version of the approval page. The
only accepted value is 'mobile'.
auth_server: str (optional) The start of the token authorization web
page. Defaults to
'https://www.google.com/accounts/OAuthAuthorizeToken'
"""
return generate_oauth_authorization_url(
self.token, hd=google_apps_domain, hl=language, btmpl=btmpl,
auth_server=auth_server)
GenerateAuthorizationUrl = generate_authorization_url
def modify_request(self, http_request):
"""Sets the Authorization header in the HTTP request using the token.
Calculates an HMAC signature using the information in the token to
indicate that the request came from this application and that this
application has permission to access a particular user's data.
Returns:
The same HTTP request object which was passed in.
"""
timestamp = str(int(time.time()))
nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)])
signature = generate_hmac_signature(
http_request, self.consumer_key, self.consumer_secret, timestamp,
nonce, version='1.0', next=self.next, token=self.token,
token_secret=self.token_secret, verifier=self.verifier)
http_request.headers['Authorization'] = generate_auth_header(
self.consumer_key, timestamp, nonce, HMAC_SHA1, signature,
version='1.0', next=self.next, token=self.token,
verifier=self.verifier)
return http_request
ModifyRequest = modify_request
class OAuthRsaToken(OAuthHmacToken):
SIGNATURE_METHOD = RSA_SHA1
def __init__(self, consumer_key, rsa_private_key, token, token_secret,
auth_state, next=None, verifier=None):
self.consumer_key = consumer_key
self.rsa_private_key = rsa_private_key
self.token = token
self.token_secret = token_secret
self.auth_state = auth_state
self.next = next
self.verifier = verifier # Used to convert request token to access token.
def modify_request(self, http_request):
"""Sets the Authorization header in the HTTP request using the token.
Calculates an RSA signature using the information in the token to
indicate that the request came from this application and that this
application has permission to access a particular user's data.
Returns:
The same HTTP request object which was passed in.
"""
timestamp = str(int(time.time()))
nonce = ''.join([str(random.randint(0, 9)) for i in xrange(15)])
signature = generate_rsa_signature(
http_request, self.consumer_key, self.rsa_private_key, timestamp,
nonce, version='1.0', next=self.next, token=self.token,
token_secret=self.token_secret, verifier=self.verifier)
http_request.headers['Authorization'] = generate_auth_header(
self.consumer_key, timestamp, nonce, RSA_SHA1, signature,
version='1.0', next=self.next, token=self.token,
verifier=self.verifier)
return http_request
ModifyRequest = modify_request
def _join_token_parts(*args):
""""Escapes and combines all strings passed in.
Used to convert a token object's members into a string instead of
using pickle.
Note: A None value will be converted to an empty string.
Returns:
A string in the form 1x|member1|member2|member3...
"""
return '|'.join([urllib.quote_plus(a or '') for a in args])
def _split_token_parts(blob):
"""Extracts and unescapes fields from the provided binary string.
Reverses the packing performed by _join_token_parts. Used to extract
the members of a token object.
Note: An empty string from the blob will be interpreted as None.
Args:
blob: str A string of the form 1x|member1|member2|member3 as created
by _join_token_parts
Returns:
A list of unescaped strings.
"""
return [urllib.unquote_plus(part) or None for part in blob.split('|')]
def token_to_blob(token):
"""Serializes the token data as a string for storage in a datastore.
Supported token classes: ClientLoginToken, AuthSubToken, SecureAuthSubToken,
OAuthRsaToken, and OAuthHmacToken.
Args:
token: A token object which must be of one of the supported token classes.
Raises:
UnsupportedTokenType if the token is not one of the supported token
classes listed above.
Returns:
A string represenging this token. The string can be converted back into
an equivalent token object using token_from_blob. Note that any members
which are set to '' will be set to None when the token is deserialized
by token_from_blob.
"""
if isinstance(token, ClientLoginToken):
return _join_token_parts('1c', token.token_string)
# Check for secure auth sub type first since it is a subclass of
# AuthSubToken.
elif isinstance(token, SecureAuthSubToken):
return _join_token_parts('1s', token.token_string, token.rsa_private_key,
*token.scopes)
elif isinstance(token, AuthSubToken):
return _join_token_parts('1a', token.token_string, *token.scopes)
# Check RSA OAuth token first since the OAuthRsaToken is a subclass of
# OAuthHmacToken.
elif isinstance(token, OAuthRsaToken):
return _join_token_parts(
'1r', token.consumer_key, token.rsa_private_key, token.token,
token.token_secret, str(token.auth_state), token.next,
token.verifier)
elif isinstance(token, OAuthHmacToken):
return _join_token_parts(
'1h', token.consumer_key, token.consumer_secret, token.token,
token.token_secret, str(token.auth_state), token.next,
token.verifier)
else:
raise UnsupportedTokenType(
'Unable to serialize token of type %s' % type(token))
TokenToBlob = token_to_blob
def token_from_blob(blob):
"""Deserializes a token string from the datastore back into a token object.
Supported token classes: ClientLoginToken, AuthSubToken, SecureAuthSubToken,
OAuthRsaToken, and OAuthHmacToken.
Args:
blob: string created by token_to_blob.
Raises:
UnsupportedTokenType if the token is not one of the supported token
classes listed above.
Returns:
A new token object with members set to the values serialized in the
blob string. Note that any members which were set to '' in the original
token will now be None.
"""
parts = _split_token_parts(blob)
if parts[0] == '1c':
return ClientLoginToken(parts[1])
elif parts[0] == '1a':
return AuthSubToken(parts[1], parts[2:])
elif parts[0] == '1s':
return SecureAuthSubToken(parts[1], parts[2], parts[3:])
elif parts[0] == '1r':
auth_state = int(parts[5])
return OAuthRsaToken(parts[1], parts[2], parts[3], parts[4], auth_state,
parts[6], parts[7])
elif parts[0] == '1h':
auth_state = int(parts[5])
return OAuthHmacToken(parts[1], parts[2], parts[3], parts[4], auth_state,
parts[6], parts[7])
else:
raise UnsupportedTokenType(
'Unable to deserialize token with type marker of %s' % parts[0])
TokenFromBlob = token_from_blob
def dump_tokens(tokens):
return ','.join([token_to_blob(t) for t in tokens])
def load_tokens(blob):
return [token_from_blob(s) for s in blob.split(',')]
def ae_save(token, token_key):
import gdata.alt.app_engine
key_name = ''.join(('gd_auth_token', token_key))
return gdata.alt.app_engine.set_token(key_name, token_to_blob(token))
AeSave = ae_save
def ae_load(token_key):
import gdata.alt.app_engine
key_name = ''.join(('gd_auth_token', token_key))
token_string = gdata.alt.app_engine.get_token(key_name)
if token_string is not None:
return token_from_blob(token_string)
else:
return None
AeLoad = ae_load
def ae_delete(token_key):
import gdata.alt.app_engine
key_name = ''.join(('gd_auth_token', token_key))
gdata.alt.app_engine.delete_token(key_name)
AeDelete = ae_delete
| apache-2.0 |
halberom/ansible-modules-core | files/fetch.py | 13 | 3495 | # this is a virtual module that is entirely implemented server side
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: fetch
short_description: Fetches a file from remote nodes
description:
- This module works like M(copy), but in reverse. It is used for fetching
files from remote machines and storing them locally in a file tree,
organized by hostname. Note that this module is written to transfer
log files that might not be present, so a missing remote file won't
be an error unless fail_on_missing is set to 'yes'.
version_added: "0.2"
options:
src:
description:
- The file on the remote system to fetch. This I(must) be a file, not a
directory. Recursive fetching may be supported in a later release.
required: true
default: null
aliases: []
dest:
description:
- A directory to save the file into. For example, if the I(dest)
directory is C(/backup) a I(src) file named C(/etc/profile) on host
C(host.example.com), would be saved into
C(/backup/host.example.com/etc/profile)
required: true
default: null
fail_on_missing:
version_added: "1.1"
description:
- Makes it fails when the source file is missing.
required: false
choices: [ "yes", "no" ]
default: "no"
validate_checksum:
version_added: "1.4"
description:
- Verify that the source and destination checksums match after the files are fetched.
required: false
choices: [ "yes", "no" ]
default: "yes"
aliases: [ "validate_md5" ]
flat:
version_added: "1.2"
description:
- Allows you to override the default behavior of appending
hostname/path/to/file to the destination. If dest ends with '/', it
will use the basename of the source file, similar to the copy module.
Obviously this is only handy if the filenames are unique.
requirements: []
author:
- "Ansible Core Team"
- "Michael DeHaan"
notes:
- When running fetch with C(become), the M(slurp) module will also be
used to fetch the contents of the file for determining the remote
checksum. This effectively doubles the transfer size, and
depending on the file size can consume all available memory on the
remote or local hosts causing a C(MemoryError). Due to this it is
advisable to run this module without C(become) whenever possible.
'''
EXAMPLES = '''
# Store file into /tmp/fetched/host.example.com/tmp/somefile
- fetch: src=/tmp/somefile dest=/tmp/fetched
# Specifying a path directly
- fetch: src=/tmp/somefile dest=/tmp/prefix-{{ inventory_hostname }} flat=yes
# Specifying a destination path
- fetch: src=/tmp/uniquefile dest=/tmp/special/ flat=yes
# Storing in a path relative to the playbook
- fetch: src=/tmp/uniquefile dest=special/prefix-{{ inventory_hostname }} flat=yes
'''
| gpl-3.0 |
cloudControl/libcloud | docs/conf.py | 29 | 8465 | # -*- coding: utf-8 -*-
#
# Apache Libcloud documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 31 12:16:27 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import subprocess
# Detect if we are running on read the docs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
cmd = 'sphinx-apidoc -d 3 -o apidocs/ ../libcloud/'
subprocess.call(cmd, shell=True)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Apache Libcloud'
copyright = u'2013, The Apache Software Foundation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.14.0'
# The full version, including alpha/beta/rc tags.
release = '0.14.0-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_build',
'*/_*.rst'
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if on_rtd:
html_theme = 'default'
RTD_NEW_THEME = True
else:
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static', '_static/images/']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ApacheLibclouddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ApacheLibcloud.tex', u'Apache Libcloud Documentation',
u'The Apache Software Foundation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'apachelibcloud', u'Apache Libcloud Documentation',
[u'The Apache Software Foundation'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ApacheLibcloud', u'Apache Libcloud Documentation',
u'The Apache Software Foundation', 'ApacheLibcloud', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autoclass_content = 'both'
| apache-2.0 |
gacarrillor/QGIS | tests/src/python/test_qgszonalstatistics.py | 11 | 6679 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsZonalStatistics.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alexander Bruy'
__date__ = '15/07/2013'
__copyright__ = 'Copyright 2013, The QGIS Project'
import qgis # NOQA
import os
import shutil
from qgis.PyQt.QtCore import QDir, QFile, QTemporaryDir
from qgis.core import QgsVectorLayer, QgsRasterLayer, QgsFeature, QgsFeatureRequest, QgsGeometry
from qgis.analysis import QgsZonalStatistics
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsZonalStatistics(unittest.TestCase):
"""Tests for zonal stats class."""
def testStatistics(self):
"""Test zonal stats"""
TEST_DATA_DIR = unitTestDataPath() + "/zonalstatistics/"
myTempPath = QDir.tempPath() + "/"
testDir = QDir(TEST_DATA_DIR)
for f in testDir.entryList(QDir.Files):
QFile.remove(myTempPath + f)
QFile.copy(TEST_DATA_DIR + f, myTempPath + f)
myVector = QgsVectorLayer(myTempPath + "polys.shp", "poly", "ogr")
myRaster = QgsRasterLayer(myTempPath + "edge_problem.asc", "raster", "gdal")
zs = QgsZonalStatistics(myVector, myRaster, "", 1, QgsZonalStatistics.All)
zs.calculateStatistics(None)
feat = QgsFeature()
# validate statistics for each feature
request = QgsFeatureRequest().setFilterFid(0)
feat = next(myVector.getFeatures(request))
myMessage = ('Expected: %f\nGot: %f\n' % (12.0, feat[1]))
assert feat[1] == 12.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (8.0, feat[2]))
assert feat[2] == 8.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.666666666666667, feat[3]))
assert abs(feat[3] - 0.666666666666667) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[4]))
assert feat[4] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.47140452079103201, feat[5]))
assert abs(feat[5] - 0.47140452079103201) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[6]))
assert feat[6] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[7]))
assert feat[7] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[8]))
assert feat[8] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[9]))
assert feat[9] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[10]))
assert feat[10] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (2.0, feat[11]))
assert feat[11] == 2.0, myMessage
request.setFilterFid(1)
feat = next(myVector.getFeatures(request))
myMessage = ('Expected: %f\nGot: %f\n' % (9.0, feat[1]))
assert feat[1] == 9.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (5.0, feat[2]))
assert feat[2] == 5.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.555555555555556, feat[3]))
assert abs(feat[3] - 0.555555555555556) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[4]))
assert feat[4] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.49690399499995302, feat[5]))
assert abs(feat[5] - 0.49690399499995302) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[6]))
assert feat[6] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[7]))
assert feat[7] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[8]))
assert feat[8] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[9]))
assert feat[9] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[10]))
assert feat[10] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (2.0, feat[11]))
assert feat[11] == 2.0, myMessage
request.setFilterFid(2)
feat = next(myVector.getFeatures(request))
myMessage = ('Expected: %f\nGot: %f\n' % (6.0, feat[1]))
assert feat[1] == 6.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (5.0, feat[2]))
assert feat[2] == 5.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.833333333333333, feat[3]))
assert abs(feat[3] - 0.833333333333333) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[4]))
assert feat[4] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.372677996249965, feat[5]))
assert abs(feat[5] - 0.372677996249965) < 0.00001, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[6]))
assert feat[6] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[7]))
assert feat[7] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[8]))
assert feat[8] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (0.0, feat[9]))
assert feat[9] == 0.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (1.0, feat[10]))
assert feat[10] == 1.0, myMessage
myMessage = ('Expected: %f\nGot: %f\n' % (2.0, feat[11]))
assert feat[11] == 2.0, myMessage
def test_enum_conversion(self):
"""Test regression GH #43245"""
tmp = QTemporaryDir()
origin = os.path.join(TEST_DATA_DIR, 'raster', 'band1_byte_ct_epsg4326.tif')
dest = os.path.join(tmp.path(), 'band1_byte_ct_epsg4326.tif')
shutil.copyfile(origin, dest)
layer = QgsRasterLayer(dest, 'rast', 'gdal')
stats = QgsZonalStatistics.calculateStatistics(
layer.dataProvider(),
QgsGeometry.fromWkt(layer.extent().asWktPolygon()),
layer.rasterUnitsPerPixelX(),
layer.rasterUnitsPerPixelY(),
1,
QgsZonalStatistics.Statistic.Max | QgsZonalStatistics.Statistic.Median
)
self.assertEqual(sorted(list(stats.keys())), [QgsZonalStatistics.Statistic.Median, QgsZonalStatistics.Statistic.Max])
self.assertEqual(stats[QgsZonalStatistics.Statistic.Median], 142.0)
self.assertEqual(stats[QgsZonalStatistics.Statistic.Max], 254.0)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
tako0910/m7-kernel | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
Dhivyap/ansible | test/units/module_utils/xenserver/test_netaddr_functions.py | 21 | 5154 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, Bojan Vitnik <bvitnik@mainstream.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils.common.network import is_mac
testcase_is_valid_mac_addr = [
('A4-23-8D-F8-C9-E5', True),
('35:71:F4:11:0B:D8', True),
('b3-bd-20-59-0c-cf', True),
('32:61:ca:65:f1:f4', True),
('asdf', False),
('A4-23-8D-G8-C9-E5', False),
('A4-3-8D-F8-C9-E5', False),
('A4-23-88D-F8-C9-E5', False),
('A4-23-8D-F8-C9_E5', False),
('A4-23--8D-F8-C9-E5', False),
]
testcase_is_valid_ip_addr = [
('0.0.0.0', True),
('10.0.0.1', True),
('192.168.0.1', True),
('255.255.255.255', True),
('asdf', False),
('a.b.c.d', False),
('345.345.345.345', False),
('-10.0.0.1', False),
]
testcase_is_valid_ip_netmask = [
('240.0.0.0', True),
('255.224.0.0', True),
('255.255.248.0', True),
('255.255.255.255', True),
('asdf', False),
('a.b.c.d', False),
('192.168.0.1', False),
('255.0.248.0', False),
]
testcase_is_valid_ip_prefix = [
('0', True),
('16', True),
('24', True),
('32', True),
('asdf', False),
('-10', False),
('60', False),
('60s', False),
]
testcase_ip_prefix_to_netmask = {
"params": [
('0', '0.0.0.0'),
('8', '255.0.0.0'),
('11', '255.224.0.0'),
('16', '255.255.0.0'),
('21', '255.255.248.0'),
('24', '255.255.255.0'),
('26', '255.255.255.192'),
('32', '255.255.255.255'),
('a', ''),
('60', ''),
],
"ids": [
'0',
'8',
'11',
'16',
'21',
'24',
'26',
'32',
'a',
'60',
],
}
testcase_ip_netmask_to_prefix = {
"params": [
('0.0.0.0', '0'),
('255.0.0.0', '8'),
('255.224.0.0', '11'),
('255.255.0.0', '16'),
('255.255.248.0', '21'),
('255.255.255.0', '24'),
('255.255.255.192', '26'),
('255.255.255.255', '32'),
('a', ''),
('60', ''),
],
"ids": [
'0.0.0.0',
'255.0.0.0',
'255.224.0.0',
'255.255.0.0',
'255.255.248.0',
'255.255.255.0',
'255.255.255.192',
'255.255.255.255',
'a',
'60',
],
}
testcase_is_valid_ip6_addr = [
('::1', True),
('2001:DB8:0:0:8:800:200C:417A', True),
('2001:DB8::8:800:200C:417A', True),
('FF01::101', True),
('asdf', False),
('2001:DB8:0:0:8:800:200C:417A:221', False),
('FF01::101::2', False),
('2001:db8:85a3::8a2e:370k:7334', False),
]
testcase_is_valid_ip6_prefix = [
('0', True),
('56', True),
('78', True),
('128', True),
('asdf', False),
('-10', False),
('345', False),
('60s', False),
]
@pytest.mark.parametrize('mac_addr, result', testcase_is_valid_mac_addr)
def test_is_valid_mac_addr(xenserver, mac_addr, result):
"""Tests against examples of valid and invalid mac addresses."""
assert is_mac(mac_addr) is result
@pytest.mark.parametrize('ip_addr, result', testcase_is_valid_ip_addr)
def test_is_valid_ip_addr(xenserver, ip_addr, result):
"""Tests against examples of valid and invalid ip addresses."""
assert xenserver.is_valid_ip_addr(ip_addr) is result
@pytest.mark.parametrize('ip_netmask, result', testcase_is_valid_ip_netmask)
def test_is_valid_ip_netmask(xenserver, ip_netmask, result):
"""Tests against examples of valid and invalid ip netmasks."""
assert xenserver.is_valid_ip_netmask(ip_netmask) is result
@pytest.mark.parametrize('ip_prefix, result', testcase_is_valid_ip_prefix)
def test_is_valid_ip_prefix(xenserver, ip_prefix, result):
"""Tests against examples of valid and invalid ip prefixes."""
assert xenserver.is_valid_ip_prefix(ip_prefix) is result
@pytest.mark.parametrize('ip_prefix, ip_netmask', testcase_ip_prefix_to_netmask['params'], ids=testcase_ip_prefix_to_netmask['ids'])
def test_ip_prefix_to_netmask(xenserver, ip_prefix, ip_netmask):
"""Tests ip prefix to netmask conversion."""
assert xenserver.ip_prefix_to_netmask(ip_prefix) == ip_netmask
@pytest.mark.parametrize('ip_netmask, ip_prefix', testcase_ip_netmask_to_prefix['params'], ids=testcase_ip_netmask_to_prefix['ids'])
def test_ip_netmask_to_prefix(xenserver, ip_netmask, ip_prefix):
"""Tests ip netmask to prefix conversion."""
assert xenserver.ip_netmask_to_prefix(ip_netmask) == ip_prefix
@pytest.mark.parametrize('ip6_addr, result', testcase_is_valid_ip6_addr)
def test_is_valid_ip6_addr(xenserver, ip6_addr, result):
"""Tests against examples of valid and invalid ip6 addresses."""
assert xenserver.is_valid_ip6_addr(ip6_addr) is result
@pytest.mark.parametrize('ip6_prefix, result', testcase_is_valid_ip6_prefix)
def test_is_valid_ip6_prefix(xenserver, ip6_prefix, result):
"""Tests against examples of valid and invalid ip6 prefixes."""
assert xenserver.is_valid_ip6_prefix(ip6_prefix) is result
| gpl-3.0 |
nickdex/cosmos | code/artificial_intelligence/src/artificial_neural_network/ann.py | 3 | 1384 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv("dataset.csv")
X = dataset.iloc[:, 3:13].values
y = dataset.iloc[:, 13].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X_1 = LabelEncoder()
X[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])
labelencoder_X_2 = LabelEncoder()
X[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])
onehotencoder = OneHotEncoder(categorical_features=[1])
X = onehotencoder.fit_transform(X).toarray()
X = X[:, 1:]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
import keras
from keras.models import Sequential
from keras.layers import Dense
classifier = Sequential()
classifier.add(
Dense(units=6, kernel_initializer="uniform", activation="relu", input_dim=11)
)
classifier.add(Dense(units=6, kernel_initializer="uniform", activation="relu"))
classifier.add(Dense(units=1, kernel_initializer="uniform", activation="sigmoid"))
classifier.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
classifier.fit(X_train, y_train, batch_size=10, epochs=100)
y_pred = classifier.predict(X_test)
y_pred = y_pred > 0.5
| gpl-3.0 |
s-gv/rnicu-webapp | rnicu-webapp/rnicu.py | 2 | 20420 | '''
rnicu-webapp accepts sensor data securely, makes them available anonymously and helps to visualize the sensor data
Copyright (C) 2013 Sagar G V
E-mail : sagar.writeme@gmail.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import webapp2
import jinja2
import os
import cgi
from google.appengine.ext import db
from google.appengine.api import users
import re
import Crypto.Cipher.AES as AES
from Crypto import Random
import math
import json
sensor_types = r'(temperature|SpO2)' # example: r'(temp|spo|hr)'
plot_bands = { # [(from,to,'color','label'),..]
'temperature' : [(-100,35,'rgba(68, 170, 213, 0.2)','hypothermia'),(35,37.8,'rgba(0, 255, 0, 0.2)','normal'),(37.8,100,'rgba(255, 0, 0, 0.2)','fever')]
}
sensor_data_encryption = False
key = 'rnicusecretpaswd'
signature = 'rnicuprojectauthsignature'
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
''' See this for a visualization of the schema. https://github.com/s-gv/rnicu-webapp/wiki/Database-Schema '''
class User(db.Model):
"""Models the User table"""
userName = db.StringProperty()
isDoctor = db.BooleanProperty()
isDispenser = db.BooleanProperty()
class Patient(db.Model):
"""Models the Patient table"""
patientName = db.StringProperty()
patientFatherName = db.StringProperty()
location = db.StringProperty()
doctorId = db.StringProperty()
notes = db.StringProperty()
sensorId = db.StringProperty()
class SensorPatientMap(db.Model):
''' Maps SensorID to PatientID '''
sensorId = db.StringProperty()
patientId = db.IntegerProperty()
class SensorData(db.Model):
''' Stores Sensor data for all patients '''
patientId = db.IntegerProperty()
type = db.StringProperty()
ind = db.IntegerProperty() # increased if entity size gets too large and a new entity is created
times = db.ListProperty(long)
vals = db.ListProperty(float)
class MainPage(webapp2.RequestHandler):
''' /
This is the main page. If the user is logged in, the dashboard is displayed.
Otherwise, a link for anonymous data access is presented
'''
def get(self):
user = users.get_current_user()
if user:
self.response.headers['Content-Type'] = 'text/html'
isDispenser = False
isDoctor = False
doctorID = 0
u = User.all().filter("userName =",user.nickname()).get()
if u:
isDispenser = u.isDispenser
u = User.all().filter("userName =",user.nickname()).get()
if u:
isDoctor = u.isDoctor
doctorID = u.key().id()
err = self.request.get('error')
note = self.request.get('note')
template_values = {
'isAdminUser': users.is_current_user_admin(),
'logoutURL': users.create_logout_url(self.request.uri),
'username' : user.nickname(),
'isDispenser' : isDispenser,
'isDoctor' : isDoctor,
'doctorID' : doctorID,
'note': note,
'err' : err
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
else:
self.response.headers['Content-Type'] = 'text/html'
template_values = {
'loginURL' : users.create_login_url(self.request.uri)
}
template = JINJA_ENVIRONMENT.get_template('blank.html')
self.response.write(template.render(template_values))
class AdminPage(webapp2.RequestHandler):
''' /admin
This page allows admins to view the list of doctors,tag dispensers in the system
'''
def get(self):
user = users.get_current_user()
err = self.request.get('error')
if user:
if users.is_current_user_admin():
self.response.headers['Content-Type'] = 'text/html'
template_values = {
'username': user.nickname(),
'rnicuUsers' : User.all(),
'err' : err,
'logoutURL': users.create_logout_url(self.request.uri),
}
template = JINJA_ENVIRONMENT.get_template('admin.html')
self.response.write(template.render(template_values))
else:
self.redirect(users.create_logout_url('/'))
else:
self.redirect(users.create_login_url(self.request.uri))
class UserCreatePage(webapp2.RequestHandler):
''' /admin/user/create
This page allows admins to add a new doctor/tag dispenser to the system
'''
def get(self):
self.redirect('/')
def post(self):
if users.get_current_user() and users.is_current_user_admin():
new_id = cgi.escape(self.request.get('googleid'))
new_types = self.request.get_all('type')
new_isDoctor = 'doctor' in new_types
new_isDispenser = 'dispenser' in new_types
if re.match(r'^[a-zA-Z0-9._\-]+$',new_id):
new_user = User()
new_user.userName = new_id
new_user.isDoctor = new_isDoctor
new_user.isDispenser = new_isDispenser
new_user.put()
self.redirect('/admin')
else:
self.redirect(cgi.escape('/admin?error=ID Invalid'))
else:
self.redirect(cgi.escape('/?error=Need to be admin to do that'))
class UserDeletePage(webapp2.RequestHandler):
''' /admin/user/(.*)/delete
Posting to this route removes the specified doctor/tag dispenser from the system
'''
def post(self,google_id):
#self.response.write(google_id)
if users.get_current_user() and users.is_current_user_admin():
q = User.all().filter("userName =",google_id).get()
if q:
q.delete()
self.redirect('/admin')
else:
self.redirect(cgi.escape('/?error=Need to be admin to do that'))
class PatientCreatePage(webapp2.RequestHandler):
''' /patient/new
This page presents a form the tag dispenser can fill up to add a patient to the system
and associate sensor tags to that patient
'''
def get(self):
user = users.get_current_user()
if user:
u = User.all().filter("userName =",user.nickname()).get()
if u and u.isDispenser:
err = self.request.get('error')
note = self.request.get('note')
self.response.headers['Content-Type'] = 'text/html'
template_values = {
'logoutURL': users.create_logout_url('/'),
'err':err,
'note':note,
'sensorid':self.request.get("sensorid")
}
template = JINJA_ENVIRONMENT.get_template('patient_create.html')
self.response.write(template.render(template_values))
else:
self.redirect(cgi.escape('/?error=Need to be a tag dispenser to do that'))
else:
self.redirect(users.create_login_url(self.request.uri))
def post(self):
user = users.get_current_user()
if user:
u = User.all().filter("userName =",user.nickname()).get()
if u and u.isDispenser:
name = cgi.escape(self.request.get('patientName'))
fname = cgi.escape(self.request.get('patientFatherName'))
loc = cgi.escape(self.request.get('location'))
gid = cgi.escape(self.request.get('doctorID'))
sensorid = cgi.escape(self.request.get('sensorID'))
notes = cgi.escape(self.request.get('notes'))
if re.match(r'^[a-zA-Z0-9._\- ]+$',name) and re.match(r'^[a-zA-Z0-9._\- ]+$',fname) and re.match(r'^[a-zA-Z0-9._\- ]+$',loc) and re.match(r'^[a-zA-Z0-9._\-]+$',gid) and re.match(r'^[,a-zA-Z0-9._\-]+$',sensorid) and re.match(r'^[a-z A-Z0-9._\-]*$',notes):
doc = User.all().filter("userName =",gid).get()
if not doc:
self.redirect(cgi.escape('/patient/new?error=Doctor not found in the database'))
return
patient = Patient()
patient.patientName = name
patient.patientFatherName = fname
patient.location = loc
patient.doctorId = gid
patient.notes = notes
patient.sensorId = sensorid
patient.put()
pid = patient.key().id()
for sid in sensorid.split(','):
sensortagmap = SensorPatientMap.all().filter("sensorId =",sid).get()
if not sensortagmap:
newtag = SensorPatientMap()
newtag.sensorId = sid
newtag.patientId = pid
newtag.put()
else:
sensortagmap.patientId = pid
sensortagmap.put()
self.redirect(cgi.escape('/?note=Tag mapped to patient successfully'))
else:
self.redirect(cgi.escape('/patient/new?error=Invalid data entered'))
else:
self.redirect(cgi.escape('/?error=Need to be a tag dispenser to do that'))
else:
self.redirect(users.create_login_url(self.request.uri))
class SensorBulkUpdatePage(webapp2.RequestHandler):
''' /sensor/bulkupdate
The gateway POSTs multiple readings to this page to upload a bunch of sensor readings at once
'''
def post(self):
sensorid = cgi.escape(self.request.get('sensorid'))
sensortype = cgi.escape(self.request.get('sensortype'))
#find patient ID given sensorid
q = SensorPatientMap.all().filter('sensorId =',sensorid).get()
if not q:
self.response.write('01 Sensor not attached to any known patient')
return
patientId = q.patientId
# is the sensortype recognized
if not re.match(sensor_types,sensortype):
self.response.write('02 Sensor Type not recognized')
return
# prep the list of readings
try:
tvpairs = json.loads(cgi.escape(self.request.get('tvpairs')))
try:
tvpairs = [(long(t), float(v)) for (t, v) in tvpairs]
except:
self.response.write('05 Failed converting (t,v) to type (long,float)')
return
except:
self.response.write('07 JSON parse failed')
return
# TODO: decryption if encryption is enabled
if sensor_data_encryption:
self.response.write('08 Encrypted bulk transfer not implemented')
return
n = len(tvpairs)
for tv in tvpairs:
try:
t = long(tv[0])
v = float(tv[1])
except:
self.response.write('05 Failed converting (t,v) to type (long,float)')
return
# add the readings
e = SensorData.all().filter('patientId =',int(patientId)).filter('type =',str(sensortype)).order('-ind').get()
last_ind = -1
while len(tvpairs) > 0:
if e:
if len(e.times) < 1000:
m = 1000 - len(e.times)
else:
# create new entity
m = 1000
last_ind = e.ind
e = SensorData()
e.patientId = patientId
e.type = sensortype
e.ind = last_ind + 1
else:
# first time !
e = SensorData()
m = 1000
e.patientId = patientId
e.type = sensortype
e.ind = 0
chunk = tvpairs[:m]
tvpairs = tvpairs[m:]
e.times += [t for (t, v) in chunk]
e.vals += [v for (t, v) in chunk]
e.put()
self.response.write('00 Done! ')
class SensorUpdatePage(webapp2.RequestHandler):
''' /sensor/update
The gateway POSTs to this page to upload sensor data
'''
def post(self):
sensorid = cgi.escape(self.request.get('sensorid'))
sensortype = cgi.escape(self.request.get('sensortype'))
timestamp = cgi.escape(self.request.get('time'))
if sensor_data_encryption:
msg = cgi.escape(self.request.get('val'))
try:
iv = msg[:2*AES.block_size].decode('hex')
value_ciphertext = msg[2*AES.block_size:]
mode = AES.MODE_CBC
decryptor = AES.new(key, mode,iv)
value = decryptor.decrypt(value_ciphertext.decode('hex'))
self.response.headers['Content-Type'] = 'text/plain'
# check if the signature is correct
l = len(signature)
if len(value) < l or value[-1*l:] != signature:
self.response.write('03 Signature incorrect')
return
value = value[:-1*l].strip()
except:
self.response.write('04 Signature decode error')
return
else:
value = cgi.escape(self.request.get('val'))
#find patient ID given sensorid
q = SensorPatientMap.all().filter('sensorId =',sensorid).get()
if not q:
self.response.write('01 Sensor not attached to any known patient')
return
patientId = q.patientId
#attach data to the write sensor data table
if re.match(sensor_types,sensortype):
try:
t = long(timestamp)
v = float(value)
except:
self.response.write('05 Failed converting (t,v) to type (long,float)')
return
e = SensorData.all().filter('patientId =',int(patientId)).filter('type =',str(sensortype)).order('-ind').get()
last_ind = -1
if e:
if len(e.times) < 1000:
# append to the time series in this entity
pass
else:
# create new entity
last_ind = e.ind
e = SensorData()
e.patientId = patientId
e.type = sensortype
e.ind = last_ind + 1
else:
# first time !
e = SensorData()
e.patientId = patientId
e.type = sensortype
e.ind = 0
e.times.append(t)
e.vals.append(v)
e.put()
else:
self.response.write('02 Sensor Type not recognized')
return
# all went well
self.response.write('00 Done! ')
class AnonPatientPage(webapp2.RequestHandler):
''' /patient
This page lists patients only by patient ID and location for anonymous data access
'''
def get(self):
self.response.headers['Content-Type'] = 'text/html'
template_values = {
'logoutURL': users.create_logout_url('/'),
'patients': Patient.all()
}
template = JINJA_ENVIRONMENT.get_template('anonpatient.html')
self.response.write(template.render(template_values))
class PatientSensorDataPage(webapp2.RequestHandler):
''' /patient/([0-9]+)/'+sensor_types
This page displays plaintext CSV sensor data for a given patient and sensor type
'''
def get(self,patientID,sensor_type):
self.response.headers['Content-Type'] = 'text/plain'
for i in getDataSeriesForPatient(patientID,sensor_type):
self.response.write(str(i[0])+','+str(i[1])+'\n')
class PatientSensorDataGraphPage(webapp2.RequestHandler):
''' /patient/([0-9]+)/'+sensor_types+'/graph
This page visualizes on a graph sensor data from a specific patient and sensor type
'''
def get(self,patientID,sensor_type):
self.response.headers['Content-Type'] = 'text/html'
bands = plot_bands.get(sensor_type,[])
r = re.match(r'(.*)/graph/?',self.request.uri)
template_values = {
'yName': sensor_type.capitalize(),
'series': getDataSeriesForPatient(patientID,sensor_type),
'ebands' : enumerate(bands),
'nbands' : len(bands),
'url' : r.group(1)
}
template = JINJA_ENVIRONMENT.get_template('sensordataplot.html')
self.response.write(template.render(template_values))
class DoctorPage(webapp2.RequestHandler):
''' /doctor/([0-9]+)/
This page lists the patients that a doctor is caring for.
'''
def get(self,doctorID):
user = users.get_current_user()
if user:
u = User.all().filter("userName =",user.nickname()).get()
if u and u.isDoctor:
q = User.all().filter("userName =",user.nickname()).get()
if q and q.key().id() == int(doctorID):
self.response.headers['Content-Type'] = 'text/html'
template_values = {
'logoutURL': users.create_logout_url('/'),
'patients': Patient.all().filter('doctorId =',user.nickname())
}
template = JINJA_ENVIRONMENT.get_template('patienttable.html')
self.response.write(template.render(template_values))
else:
self.redirect('/?error=Access Control Violation')
else:
self.redirect('/?error=Need to be a doctor to do that')
else:
self.redirect(users.create_login_url(self.request.uri))
class PatientDataPage(webapp2.RequestHandler):
''' /patient/([0-9]+)/
This page lists links to pages that'll display sensor data for different sensor types
'''
def get(self,patientID):
self.response.headers['Content-Type'] = 'text/html'
template_values = {
'logoutURL': users.create_logout_url('/'),
'patientId' : patientID,
'sensorlist' : sensor_types[1:-1].split('|')
}
template = JINJA_ENVIRONMENT.get_template('patientdata.html')
self.response.write(template.render(template_values))
def getDataSeriesForPatient(patientID,sensortype):
for entity in SensorData.all().filter('patientId =',int(patientID)).filter('type =',str(sensortype)).order('-ind'):
for (t,val) in reversed(zip(entity.times,entity.vals)):
if not math.isnan(val):
yield (t,val)
application = webapp2.WSGIApplication([
('/', MainPage),
('/admin/?',AdminPage),
('/admin/user/create',UserCreatePage),
(r'/admin/user/(.*)/delete',UserDeletePage),
(r'/patient/new/?',PatientCreatePage),
('/patient/create',PatientCreatePage),
('/sensor/update',SensorUpdatePage),
('/sensor/bulkupdate',SensorBulkUpdatePage),
('/patient/?',AnonPatientPage),
(r'/patient/([0-9]+)/'+sensor_types+'/?$',PatientSensorDataPage),
(r'/patient/([0-9]+)/?',PatientDataPage),
(r'/doctor/([0-9]+)/?',DoctorPage),
(r'/patient/([0-9]+)/'+sensor_types+'/graph/?$',PatientSensorDataGraphPage)
], debug=True)
| agpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/numpy/distutils/npy_pkg_config.py | 66 | 13243 | from __future__ import division, absolute_import, print_function
import sys
import re
import os
if sys.version_info[0] < 3:
from ConfigParser import RawConfigParser, NoOptionError
else:
from configparser import RawConfigParser, NoOptionError
__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
'read_config', 'parse_flags']
_VAR = re.compile(r'\$\{([a-zA-Z0-9_-]+)\}')
class FormatError(IOError):
"""
Exception thrown when there is a problem parsing a configuration file.
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class PkgNotFound(IOError):
"""Exception raised when a package can not be located."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def parse_flags(line):
"""
Parse a line from a config file containing compile flags.
Parameters
----------
line : str
A single line containing one or more compile flags.
Returns
-------
d : dict
Dictionary of parsed flags, split into relevant categories.
These categories are the keys of `d`:
* 'include_dirs'
* 'library_dirs'
* 'libraries'
* 'macros'
* 'ignored'
"""
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
'macros': [], 'ignored': []}
flags = (' ' + line).split(' -')
for flag in flags:
flag = '-' + flag
if len(flag) > 0:
if flag.startswith('-I'):
d['include_dirs'].append(flag[2:].strip())
elif flag.startswith('-L'):
d['library_dirs'].append(flag[2:].strip())
elif flag.startswith('-l'):
d['libraries'].append(flag[2:].strip())
elif flag.startswith('-D'):
d['macros'].append(flag[2:].strip())
else:
d['ignored'].append(flag)
return d
def _escape_backslash(val):
return val.replace('\\', '\\\\')
class LibraryInfo(object):
"""
Object containing build information about a library.
Parameters
----------
name : str
The library name.
description : str
Description of the library.
version : str
Version string.
sections : dict
The sections of the configuration file for the library. The keys are
the section headers, the values the text under each header.
vars : class instance
A `VariableSet` instance, which contains ``(name, value)`` pairs for
variables defined in the configuration file for the library.
requires : sequence, optional
The required libraries for the library to be installed.
Notes
-----
All input parameters (except "sections" which is a method) are available as
attributes of the same name.
"""
def __init__(self, name, description, version, sections, vars, requires=None):
self.name = name
self.description = description
if requires:
self.requires = requires
else:
self.requires = []
self.version = version
self._sections = sections
self.vars = vars
def sections(self):
"""
Return the section headers of the config file.
Parameters
----------
None
Returns
-------
keys : list of str
The list of section headers.
"""
return list(self._sections.keys())
def cflags(self, section="default"):
val = self.vars.interpolate(self._sections[section]['cflags'])
return _escape_backslash(val)
def libs(self, section="default"):
val = self.vars.interpolate(self._sections[section]['libs'])
return _escape_backslash(val)
def __str__(self):
m = ['Name: %s' % self.name, 'Description: %s' % self.description]
if self.requires:
m.append('Requires:')
else:
m.append('Requires: %s' % ",".join(self.requires))
m.append('Version: %s' % self.version)
return "\n".join(m)
class VariableSet(object):
"""
Container object for the variables defined in a config file.
`VariableSet` can be used as a plain dictionary, with the variable names
as keys.
Parameters
----------
d : dict
Dict of items in the "variables" section of the configuration file.
"""
def __init__(self, d):
self._raw_data = dict([(k, v) for k, v in d.items()])
self._re = {}
self._re_sub = {}
self._init_parse()
def _init_parse(self):
for k, v in self._raw_data.items():
self._init_parse_var(k, v)
def _init_parse_var(self, name, value):
self._re[name] = re.compile(r'\$\{%s\}' % name)
self._re_sub[name] = value
def interpolate(self, value):
# Brute force: we keep interpolating until there is no '${var}' anymore
# or until interpolated string is equal to input string
def _interpolate(value):
for k in self._re.keys():
value = self._re[k].sub(self._re_sub[k], value)
return value
while _VAR.search(value):
nvalue = _interpolate(value)
if nvalue == value:
break
value = nvalue
return value
def variables(self):
"""
Return the list of variable names.
Parameters
----------
None
Returns
-------
names : list of str
The names of all variables in the `VariableSet` instance.
"""
return list(self._raw_data.keys())
# Emulate a dict to set/get variables values
def __getitem__(self, name):
return self._raw_data[name]
def __setitem__(self, name, value):
self._raw_data[name] = value
self._init_parse_var(name, value)
def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
d = {}
for name, value in config.items('meta'):
d[name] = value
for k in ['name', 'description', 'version']:
if not k in d:
raise FormatError("Option %s (section [meta]) is mandatory, "
"but not found" % k)
if not 'requires' in d:
d['requires'] = []
return d
def parse_variables(config):
if not config.has_section('variables'):
raise FormatError("No variables section found !")
d = {}
for name, value in config.items("variables"):
d[name] = value
return VariableSet(d)
def parse_sections(config):
return meta_d, r
def pkg_to_filename(pkg_name):
return "%s.ini" % pkg_name
def parse_config(filename, dirs=None):
if dirs:
filenames = [os.path.join(d, filename) for d in dirs]
else:
filenames = [filename]
config = RawConfigParser()
n = config.read(filenames)
if not len(n) >= 1:
raise PkgNotFound("Could not find file(s) %s" % str(filenames))
# Parse meta and variables sections
meta = parse_meta(config)
vars = {}
if config.has_section('variables'):
for name, value in config.items("variables"):
vars[name] = _escape_backslash(value)
# Parse "normal" sections
secs = [s for s in config.sections() if not s in ['meta', 'variables']]
sections = {}
requires = {}
for s in secs:
d = {}
if config.has_option(s, "requires"):
requires[s] = config.get(s, 'requires')
for name, value in config.items(s):
d[name] = value
sections[s] = d
return meta, vars, sections, requires
def _read_config_imp(filenames, dirs=None):
def _read_config(f):
meta, vars, sections, reqs = parse_config(f, dirs)
# recursively add sections and variables of required libraries
for rname, rvalue in reqs.items():
nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
# Update var dict for variables not in 'top' config file
for k, v in nvars.items():
if not k in vars:
vars[k] = v
# Update sec dict
for oname, ovalue in nsections[rname].items():
if ovalue:
sections[rname][oname] += ' %s' % ovalue
return meta, vars, sections, reqs
meta, vars, sections, reqs = _read_config(filenames)
# FIXME: document this. If pkgname is defined in the variables section, and
# there is no pkgdir variable defined, pkgdir is automatically defined to
# the path of pkgname. This requires the package to be imported to work
if not 'pkgdir' in vars and "pkgname" in vars:
pkgname = vars["pkgname"]
if not pkgname in sys.modules:
raise ValueError("You should import %s to get information on %s" %
(pkgname, meta["name"]))
mod = sys.modules[pkgname]
vars["pkgdir"] = _escape_backslash(os.path.dirname(mod.__file__))
return LibraryInfo(name=meta["name"], description=meta["description"],
version=meta["version"], sections=sections, vars=VariableSet(vars))
# Trivial cache to cache LibraryInfo instances creation. To be really
# efficient, the cache should be handled in read_config, since a same file can
# be parsed many time outside LibraryInfo creation, but I doubt this will be a
# problem in practice
_CACHE = {}
def read_config(pkgname, dirs=None):
"""
Return library info for a package from its configuration file.
Parameters
----------
pkgname : str
Name of the package (should match the name of the .ini file, without
the extension, e.g. foo for the file foo.ini).
dirs : sequence, optional
If given, should be a sequence of directories - usually including
the NumPy base directory - where to look for npy-pkg-config files.
Returns
-------
pkginfo : class instance
The `LibraryInfo` instance containing the build information.
Raises
------
PkgNotFound
If the package is not found.
See Also
--------
misc_util.get_info, misc_util.get_pkg_info
Examples
--------
>>> npymath_info = np.distutils.npy_pkg_config.read_config('npymath')
>>> type(npymath_info)
<class 'numpy.distutils.npy_pkg_config.LibraryInfo'>
>>> print(npymath_info)
Name: npymath
Description: Portable, core math library implementing C99 standard
Requires:
Version: 0.1 #random
"""
try:
return _CACHE[pkgname]
except KeyError:
v = _read_config_imp(pkg_to_filename(pkgname), dirs)
_CACHE[pkgname] = v
return v
# TODO:
# - implements version comparison (modversion + atleast)
# pkg-config simple emulator - useful for debugging, and maybe later to query
# the system
if __name__ == '__main__':
import sys
from optparse import OptionParser
import glob
parser = OptionParser()
parser.add_option("--cflags", dest="cflags", action="store_true",
help="output all preprocessor and compiler flags")
parser.add_option("--libs", dest="libs", action="store_true",
help="output all linker flags")
parser.add_option("--use-section", dest="section",
help="use this section instead of default for options")
parser.add_option("--version", dest="version", action="store_true",
help="output version")
parser.add_option("--atleast-version", dest="min_version",
help="Minimal version")
parser.add_option("--list-all", dest="list_all", action="store_true",
help="Minimal version")
parser.add_option("--define-variable", dest="define_variable",
help="Replace variable with the given value")
(options, args) = parser.parse_args(sys.argv)
if len(args) < 2:
raise ValueError("Expect package name on the command line:")
if options.list_all:
files = glob.glob("*.ini")
for f in files:
info = read_config(f)
print("%s\t%s - %s" % (info.name, info.name, info.description))
pkg_name = args[1]
import os
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
else:
info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
if options.section:
section = options.section
else:
section = "default"
if options.define_variable:
m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
if not m:
raise ValueError("--define-variable option should be of " \
"the form --define-variable=foo=bar")
else:
name = m.group(1)
value = m.group(2)
info.vars[name] = value
if options.cflags:
print(info.cflags(section))
if options.libs:
print(info.libs(section))
if options.version:
print(info.version)
if options.min_version:
print(info.version >= options.min_version)
| mit |
ujjwalwahi/odoo | openerp/addons/base/tests/test_ir_values.py | 462 | 6705 | import unittest2
import openerp.tests.common as common
class test_ir_values(common.TransactionCase):
def test_00(self):
# Create some default value for some (non-existing) model, for all users.
ir_values = self.registry('ir.values')
# use the old API
ir_values.set(self.cr, self.uid, 'default', False, 'my_test_field',
['unexisting_model'], 'global value')
# use the new API
ir_values.set_default(self.cr, self.uid, 'other_unexisting_model',
'my_other_test_field', 'conditional value', condition='foo=bar')
# Retrieve them.
ir_values = self.registry('ir.values')
# d is a list of triplets (id, name, value)
# Old API
d = ir_values.get(self.cr, self.uid, 'default', False, ['unexisting_model'])
assert len(d) == 1, "Only one single value should be retrieved for this model"
assert d[0][1] == 'my_test_field', "Can't retrieve the created default value. (1)"
assert d[0][2] == 'global value', "Can't retrieve the created default value. (2)"
# New API, Conditional version
d = ir_values.get_defaults(self.cr, self.uid, 'other_unexisting_model')
assert len(d) == 0, "No value should be retrieved, the condition is not met"
d = ir_values.get_defaults(self.cr, self.uid, 'other_unexisting_model', condition="foo=eggs")
assert len(d) == 0, 'Condition is not met either, no defaults should be returned'
d = ir_values.get_defaults(self.cr, self.uid, 'other_unexisting_model', condition="foo=bar")
assert len(d) == 1, "Only one single value should be retrieved"
assert d[0][1] == 'my_other_test_field', "Can't retrieve the created default value. (5)"
assert d[0][2] == 'conditional value', "Can't retrieve the created default value. (6)"
# Do it again but for a specific user.
ir_values = self.registry('ir.values')
ir_values.set(self.cr, self.uid, 'default', False, 'my_test_field',['unexisting_model'], 'specific value', preserve_user=True)
# Retrieve it and check it is the one for the current user.
ir_values = self.registry('ir.values')
d = ir_values.get(self.cr, self.uid, 'default', False, ['unexisting_model'])
assert len(d) == 1, "Only one default must be returned per field"
assert d[0][1] == 'my_test_field', "Can't retrieve the created default value."
assert d[0][2] == 'specific value', "Can't retrieve the created default value."
# Create some action bindings for a non-existing model.
act_id_1 = self.ref('base.act_values_form_action')
act_id_2 = self.ref('base.act_values_form_defaults')
act_id_3 = self.ref('base.action_res_company_form')
act_id_4 = self.ref('base.action_res_company_tree')
ir_values = self.registry('ir.values')
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_1, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action 2', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_2, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'client_action_multi', 'Side Wizard', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_3, isobject=True)
report_ids = self.registry('ir.actions.report.xml').search(self.cr, self.uid, [], {})
reports = self.registry('ir.actions.report.xml').browse(self.cr, self.uid, report_ids, {})
report_id = [report.id for report in reports if not report.groups_id][0] # assume at least one
ir_values.set(self.cr, self.uid, 'action', 'client_print_multi', 'Nice Report', ['unexisting_model'], 'ir.actions.report.xml,%d' % report_id, isobject=True)
ir_values.set(self.cr, self.uid, 'action', 'client_action_relate', 'Related Stuff', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_4, isobject=True)
# Replace one action binding to set a new name.
ir_values = self.registry('ir.values')
ir_values.set(self.cr, self.uid, 'action', 'tree_but_open', 'OnDblClick Action New', ['unexisting_model'], 'ir.actions.act_window,%d' % act_id_1, isobject=True)
# Retrieve the action bindings and check they're correct
ir_values = self.registry('ir.values')
actions = ir_values.get(self.cr, self.uid, 'action', 'tree_but_open', ['unexisting_model'])
assert len(actions) == 2, "Mismatching number of bound actions"
#first action
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'OnDblClick Action 2', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == act_id_2, 'Bound action does not match definition'
#second action - this ones comes last because it was re-created with a different name
assert len(actions[1]) == 3, "Malformed action definition"
assert actions[1][1] == 'OnDblClick Action New', 'Re-Registering an action should replace it'
assert isinstance(actions[1][2], dict) and actions[1][2]['id'] == act_id_1, 'Bound action does not match definition'
actions = ir_values.get(self.cr, self.uid, 'action', 'client_action_multi', ['unexisting_model'])
assert len(actions) == 1, "Mismatching number of bound actions"
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'Side Wizard', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == act_id_3, 'Bound action does not match definition'
actions = ir_values.get(self.cr, self.uid, 'action', 'client_print_multi', ['unexisting_model'])
assert len(actions) == 1, "Mismatching number of bound actions"
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'Nice Report', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == report_id, 'Bound action does not match definition'
actions = ir_values.get(self.cr, self.uid, 'action', 'client_action_relate', ['unexisting_model'])
assert len(actions) == 1, "Mismatching number of bound actions"
assert len(actions[0]) == 3, "Malformed action definition"
assert actions[0][1] == 'Related Stuff', 'Bound action does not match definition'
assert isinstance(actions[0][2], dict) and actions[0][2]['id'] == act_id_4, 'Bound action does not match definition'
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 |
zzhhui/webrtc-jingle-client | tools/badit_android.py | 7 | 9528 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Badit (BuildAndDebugIT) is a program to to simplify the process of deployment
and updating an android project
copyright: 2012, (c) tuenti.com
author: Nick Flink <nickflink@github.com>
"""
import sys
import getopt
import os
import subprocess
# import getpass
import logging
import pprint
build_bit = (1 << 0)
uninstall_bit = (1 << 1)
install_bit = (1 << 2)
run_bit = (1 << 3)
debug_bit = (1 << 4)
#global vars
taskMask = build_bit | install_bit | run_bit | debug_bit
profile = "default_debug"
serial = None
supportedProfiles = ["video_debug", "video_final", "video_release",
"default_debug", "default_release", "default_final",
"tuenti_debug", "tuenti_release", "tuenti_final"]
supportedNDKs = ["r8e", "r8d"]
##
## The following options are provided.
## --help [-h]. What you are reading now
## --log-level [-l]. setting the log level dynamically
## --profile [-p]. Setting build profile
## --task-mask [-m]. skip over some tasks
## --build [-b] build. Build the apk
## --uninstall [-u] uninstall. Uninstall the apk
## --install [-i] install. Install the apk
## --run [-r] run. Run the apk
## --debug [-d] debug. Debug the apk
## --serial [-s] serial. Use the device with the matching serial
## you can get a list of serials with adb devices
logger = logging.getLogger(__name__)
def usage():
fh = open(__file__, "r")
me = fh.readlines()
sys.stderr.write("usage:\n")
for line in me:
if line.find("##") == 0:
sys.stderr.write(line)
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def runCmd(name, cmdList):
logger.info("=> " + name + " = " + " ".join(cmdList))
if which(cmdList[0]) == None:
logger.info("[KO] [" + cmdList[0] + "] command doesn't exist you must install it")
return 1
if subprocess.call(cmdList) == 0:
logger.info("[OK] " + name + " = " + " ".join(cmdList))
else:
logger.error("[KO] " + name + " = " + " ".join(cmdList))
return 1
return 0
def mavenBuild():
if taskMask & build_bit:
if profile not in supportedProfiles:
logger.error("[KO] Build bad profile " + profile + "\noptions are: " + ", ".join(supportedProfiles) + "\nexiting.")
sys.exit(1)
if runCmd("Build", ["mvn", "install", "--activate-profiles", profile]) != 0:
sys.exit(1)
else:
logger.info("skipping build")
def uninstallApk():
global serial
if taskMask & uninstall_bit:
if serial == None:
serial = getFirstDeviceSerial()
# we don't want to fail during the uninstall
runCmd("Uninstall", ["adb", "-s", serial, "uninstall", "com.tuenti.voice.example"])
else:
logger.info("skipping uninstall")
def installApk():
if taskMask & install_bit:
# homePath = os.environ['HOME']
# snapshotVersion = "1.0-SNAPSHOT"
uninstalled = False
success = False
while not success:
#android:redeploy uninstalls and reinstalls
if runCmd("Install", ["mvn", "-pl", "voice-client-example", "android:deploy"]) == 0:
success = True
elif not uninstalled:
logger.info("install failed trying uninstall first")
uninstallApk()
uninstalled = True
else:
sys.exit(1)
else:
logger.info("skipping install")
def runApk():
if taskMask & run_bit:
if runCmd("Run", ["mvn", "-pl", "voice-client-example", "android:run"]) != 0:
sys.exit(1)
else:
logger.info("skipping run")
#we should probably run it for every connected device rather than just the first
def getFirstDeviceSerial():
adbDevicesCmd = ["adb", "devices"]
p = subprocess.Popen(adbDevicesCmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if len(err) != 0:
logger.error(" ".join(adbDevicesCmd) + " exited with errors:\n" + err)
sys.exit(1)
for line in out.splitlines():
if line.endswith("device"):
return line.split("\t")[0]
logger.error("no device found exiting.\n")
sys.exit(1)
return None
def debugApk():
global serial
if taskMask & debug_bit:
os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
if serial == None:
serial = getFirstDeviceSerial()
cpuInfoCmd = ["adb", "-s", serial, "shell", "cat", "/system/build.prop"]
p = subprocess.Popen(cpuInfoCmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if len(err) != 0:
logger.error(" ".join(cpuInfoCmd) + " exited with errors:\n" + err)
sys.exit(1)
for line in out.splitlines():
if line.startswith("ro.product.cpu.abi="):
cpuAbi = line.split("=")[1]
androidBuildDir = os.path.join("build", "android")
gdbApk = os.path.join(androidBuildDir, "gdb_apk")
envSetup = os.path.join(androidBuildDir, "envsetup.sh")
abiSymbolDir = os.path.join("android", "voice-client-core", "obj", profile, "local", cpuAbi)
debugCmd = ["bash", "-c", "'source " + envSetup + " && " + gdbApk + " -p com.tuenti.voice.example -s VoiceClientService -l " + abiSymbolDir + "'"]
logger.info("Debugging requires a shell. Copy paste the below to begin debugging:\n" + " ".join(debugCmd))
#runCmd("Debug", debugCmd) this command must be run from a shell so keeping it commented out
else:
logger.info("skipping debug")
def checkSDK():
try:
sdkRoot = os.environ['ANDROID_SDK_ROOT']
platformsDir = os.path.join(sdkRoot, "platforms", "android-14")
except KeyError:
logger.error("Please set ANDROID_SDK_ROOT")
sys.exit(1)
try:
androidCmd = os.path.join(str(sdkRoot), "tools", "android")
f = open(androidCmd, 'r')
f.close()
except IOError:
logger.error("ANDROID_SDK_ROOT is corrupt cannot find:\n\t" + androidCmd)
sys.exit(1)
if not os.path.isdir(platformsDir):
logger.error("You're missing android-14, which is required")
logger.error("You can install it by copy pasting the below command")
sys.stderr.write(androidCmd + " update sdk -u --filter android-14\n")
sys.exit(1)
def checkNDK():
try:
ndkRoot = os.environ['ANDROID_NDK_ROOT']
releaseTxt = os.path.join(ndkRoot, "RELEASE.TXT")
releaseHandle = open(releaseTxt, 'r')
releaseContents = releaseHandle.read(3)
releaseHandle.close()
except (KeyError, IOError):
logger.error("Please set ANDROID_NDK_ROOT")
sys.exit(1)
if releaseContents not in supportedNDKs:
logger.error("Please install a supported NDK version:\n\t" + " ".join(supportedNDKs))
sys.exit(1)
def main(argv=None):
global taskMask
global profile
global serial
logLevel = "INFO"
logging.basicConfig(level=logging.INFO)
newTaskMask = 0
try:
opts, args = getopt.getopt(sys.argv[1:], "hbuirdl:p:m:s:", ["help", "build", "uninstall", "install", "run", "debug", "log-level=", "profile=", "task-mask=", "serial="])
except getopt.GetoptError, err:
# print help information and exit:
logger.error(err) # will print something like "option -a not recognized"
print "ERROR 2"
usage()
return 1
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit(1)
elif o in ("-b", "--build"):
newTaskMask |= build_bit
elif o in ("-u", "--uninstall"):
newTaskMask |= uninstall_bit
elif o in ("-i", "--install"):
newTaskMask |= install_bit
elif o in ("-r", "--run"):
newTaskMask |= run_bit
elif o in ("-d", "--debug"):
newTaskMask |= debug_bit
elif o in ("-l", "--log-level"):
logLevel = a
elif o in ("-p", "--profile"):
profile = a
elif o in ("-m", "--task-mask"):
newTaskMask |= int(a)
elif o in ("-s", "--serial"):
serial = a
else:
usage()
if newTaskMask > 0:
taskMask = newTaskMask
if logLevel == "DEBUG":
logging.basicConfig(level=logging.DEBUG)
logger.info("Running with:")
logger.info("\tprofile: " + profile)
logger.info("\tlog-level: " + logLevel)
logger.info("\ttask-mask: " + str(taskMask))
logger.info("\tserial: " + str(serial))
checkSDK()
checkNDK()
savedPath = os.getcwd()
os.chdir(os.path.join(os.path.dirname(os.path.dirname(__file__)), "android"))
mavenBuild()
uninstallApk()
installApk()
runApk()
debugApk()
os.chdir(savedPath)
logger.info("Done!")
return 0
if __name__ == "__main__":
newTaskMask = 1
sys.exit(main())
| bsd-3-clause |
imk1/IMKTFBindingCode | makeClustergram.py | 1 | 2060 | import sys
import argparse
import pylab
import numpy as np
import matplotlib.pyplot as plt
from heatmapcluster import heatmapcluster
def parseArgument():
# Parse the input
parser=argparse.ArgumentParser(description=\
"Make a clustergram of a matrix")
parser.add_argument("--matFileName", required=True,\
help='File with matrix')
parser.add_argument("--columnNamesFileName", required=True,\
help='File with column names')
parser.add_argument("--clusterFileName", required=True,\
help='File where cluster information will be recorded')
parser.add_argument("--metric", required=False, default='euclidean', \
help='Metric for clustering')
parser.add_argument("--contextIndex", required=False, type=int, \
default=None, \
help='Consider only a specific row')
parser.add_argument("--logSignals", action='store_true', required=False,\
help='log the signals')
options = parser.parse_args()
return options
def getStringList(stringFileName):
# Get a list of strings from a file
stringFile = open(stringFileName)
stringList = [line.strip() for line in stringFile]
stringFile.close()
return stringList
def makeClustergram(options):
# Make a clustergram of a matrix
sys.setrecursionlimit(100000)
mat = np.loadtxt(options.matFileName, dtype=np.float16)
if options.logSignals:
# log2 the signal values
mat = np.log2(mat + 1)
columnNames = getStringList(options.columnNamesFileName)
if options.contextIndex is not None:
# Considering the data in the context of a TF
rowsToConsider = np.nonzero(mat[:,options.contextIndex])
mat = mat[rowsToConsider,:].squeeze()
rowNames = [str(i) for i in range(mat.shape[0])]
pylab.figure()
h = heatmapcluster(mat, rowNames, columnNames,
num_row_clusters=None, num_col_clusters=None,
label_fontsize=8,
xlabel_rotation=-75,
cmap=plt.cm.YlOrRd,
show_colorbar=True,
top_dendrogram=True,
metric=options.metric)
np.savetxt(options.clusterFileName, h.row_linkage, fmt='%.4f')
pylab.show()
if __name__ == "__main__":
options = parseArgument()
makeClustergram(options)
| mit |
owlabs/incubator-airflow | airflow/hooks/dbapi_hook.py | 1 | 11082 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from builtins import str
from past.builtins import basestring
from datetime import datetime
from contextlib import closing
import sys
from typing import Optional
from sqlalchemy import create_engine
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None # type: Optional[str]
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object
"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_uri(self):
conn = self.get_connection(getattr(self, self.conn_name_attr))
login = ''
if conn.login:
login = '{conn.login}:{conn.password}@'.format(conn=conn)
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
uri = '{conn.conn_type}://{login}{host}/'.format(
conn=conn, login=login, host=host)
if conn.schema:
uri += conn.schema
return uri
def get_sqlalchemy_engine(self, engine_kwargs=None):
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None):
"""
Executes the sql and returns a pandas dataframe
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
import pandas.io.sql as psql
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters)
def get_records(self, sql, parameters=None):
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql, parameters=None):
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if sys.version_info[0] < 3:
sql = sql.encode('utf-8')
with closing(self.get_conn()) as conn:
with closing(conn.cursor()) as cur:
if parameters is not None:
cur.execute(sql, parameters)
else:
cur.execute(sql)
return cur.fetchone()
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:type autocommit: bool
:param parameters: The parameters to render the SQL query with.
:type parameters: mapping or iterable
"""
if isinstance(sql, basestring):
sql = [sql]
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
for s in sql:
if sys.version_info[0] < 3:
s = s.encode('utf-8')
if parameters is not None:
self.log.info("{} with parameters {}".format(s, parameters))
cur.execute(s, parameters)
else:
self.log.info(s)
cur.execute(s)
# If autocommit was set to False for db that supports autocommit,
# or if db does not supports autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
def set_autocommit(self, conn, autocommit):
"""
Sets the autocommit flag on the connection
"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr)
)
conn.autocommit = autocommit
def get_autocommit(self, conn):
"""
Get autocommit setting for the provided connection.
Return True if conn.autocommit is set to True.
Return False if conn.autocommit is not set or set to False or conn
does not support autocommit.
:param conn: Connection to get autocommit setting from.
:type conn: connection object.
:return: connection autocommit setting.
:rtype: bool
"""
return getattr(conn, 'autocommit', False) and self.supports_autocommit
def get_cursor(self):
"""
Returns a cursor
"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000,
replace=False):
"""
A generic way to insert a set of tuples into a table,
a new transaction is created every commit_every rows
:param table: Name of the target table
:type table: str
:param rows: The rows to insert into the table
:type rows: iterable of tuples
:param target_fields: The names of the columns to fill in the table
:type target_fields: iterable of strings
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:type commit_every: int
:param replace: Whether to replace instead of insert
:type replace: bool
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
placeholders = ["%s", ] * len(values)
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += "{0} {1} VALUES ({2})".format(
table,
target_fields,
",".join(placeholders))
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info(
"Loaded %s into %s rows so far", i, table
)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows", i)
@staticmethod
def _serialize_cell(cell, conn=None):
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:type cell: object
:param conn: The database connection
:type conn: connection object
:return: The serialized cell
:rtype: str
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file
:param table: The name of the source table
:type table: str
:param tmp_file: The path of the target file
:type tmp_file: str
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
:param table: The name of the target table
:type table: str
:param tmp_file: The path of the file to load into the table
:type tmp_file: str
"""
raise NotImplementedError()
| apache-2.0 |
hobson/totalgood | totalgood/pacs/predictor.py | 1 | 4199 | #!python manage.py shell_plus <
import pandas as pd
np = pd.np
np.norm = np.linalg.norm
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import train_test_split
from pacs.models import RawCommittees
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
def debug():
import ipdb
ipdb.set_trace()
class PACClassifier(SGDClassifier):
def __init__(self,
names=np.array(RawCommittees.objects.values_list('committee_name', flat=True)),
labels=RawCommittees.objects.values_list('committee_type', 'committee_subtype'),
alpha=1e-5,
penalty='l1',
verbosity=1,
):
"""Train a classifier that predicts a committee type, subtype (label) from its name
Args:
names (array of str): the committee names (space-delimitted words with a few words)
labels (array of 2-tuple of str): the committee_type and subtype, Nones/NaNs/floats are stringified
alpha (float): learning rate (sklearn TFIDF classifier examples use 1e-5 to 1e-6)
default: 1e-5
penalty: 'none', 'l2', 'l1', or 'elasticnet' # regularization penalty on the feature weights
Returns:
SGDClassifier: Trained SVM classifier instance
"""
super(PACClassifier, self).__init__(alpha=alpha, penalty=penalty)
if verbosity is not None:
self.verbosity = verbosity
# vectorizer = CountVectorizer(min_df=1)
# word_bag = vectorizer.fit_transform(self.names)
# print(word_bag)
self.names = (names if isinstance(names, (list, np.ndarray))
else RawCommittees.objects.values_list('committee_name', flat=True))
self.pac_type_tuples = RawCommittees.objects.values_list('committee_type', 'committee_subtype')
self.labels = np.array(list(labels or self.pac_type_tuples))
# self.labels = [', '.join(str(s) for s in pt) for pt in self.pac_type_tuples]
self.labels = np.array([str(lbl) for lbl in self.labels])
self.label_set = sorted(np.unique(self.labels))
self.label_dict = dict(list(zip(self.label_set, range(len(self.label_set)))))
self.label_ints = np.array([self.label_dict[label] for label in self.labels])
if self.verbosity > 1:
print(pd.Series(self.labels))
if self.verbosity > 0:
print(np.unique(self.labels))
self.tfidf = TfidfVectorizer(analyzer='word', ngram_range=(1, 1), stop_words='english')
self.tfidf_matrix = self.tfidf.fit_transform(self.names)
if verbosity > 1:
print(self.tfidf.get_feature_names())
self.train_tfidf, self.test_tfidf, self.train_labels, self.test_labels = train_test_split(
self.tfidf_matrix, self.label_ints, test_size=.25)
# alpha: learning rate (default 1e-4, but other TFIDF classifier examples use 1e-5 to 1e-6)
# penalty: 'none', 'l2', 'l1', or 'elasticnet' # regularization penalty on the feature weights
self.svn_matrix = self.fit(self.train_tfidf, self.train_labels)
if verbosity > 0:
print(self.score(self.train_tfidf, self.train_labels))
# Typically > 98% recall (accuracy on training set)
def predict_pac_type(self, name):
name = str(name)
vec = self.tfidf.transform(name)
predicted_label = self.predict(vec)
print(predicted_label)
return predicted_label
def similarity(self, name1, name2):
# tfidf is already normalized, so no need to divide by the norm of each vector?
vec1, vec2 = self.tfidf.transform(np.array([name1, name2]))
# cosine distance between two tfidf vectors
return vec1.dot(vec2.T)[0, 0]
def similarity_matrix(self):
return self.tfidf_matrix * self.tfidf_matrix.T
| mit |
nigelsmall/py2neo | test/test_node.py | 1 | 6120 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2016, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo import Node, Relationship, cast_node, remote, RemoteEntity
from test.compat import long
from test.util import GraphTestCase
from py2neo.packages.httpstream import ClientError
class DodgyClientError(ClientError):
status_code = 499
class NodeTestCase(GraphTestCase):
def test_can_create_local_node(self):
a = Node("Person", name="Alice", age=33)
assert set(a.labels()) == {"Person"}
assert dict(a) == {"name": "Alice", "age": 33}
def test_can_create_remote_node(self):
a = Node("Person", name="Alice", age=33)
self.graph.create(a)
assert set(a.labels()) == {"Person"}
assert dict(a) == {"name": "Alice", "age": 33}
assert remote(a).ref.startswith("node/")
assert repr(remote(a))
def test_bound_node_equals_unbound_node_with_same_properties(self):
alice_1 = Node(name="Alice")
alice_1.__remote__ = RemoteEntity("http://localhost:7474/db/data/node/1")
alice_2 = Node(name="Alice")
assert set(alice_1.labels()) == set(alice_2.labels())
assert dict(alice_1) == dict(alice_2)
def test_bound_node_equality(self):
alice_1 = Node(name="Alice")
alice_1.__remote__ = RemoteEntity("http://localhost:7474/db/data/node/1")
Node.cache.clear()
alice_2 = Node(name="Alice")
alice_2.__remote__ = RemoteEntity(remote(alice_1).uri)
assert alice_1 == alice_2
def test_unbound_node_equality(self):
alice_1 = Node("Person", name="Alice")
alice_2 = Node("Person", name="Alice")
assert set(alice_1.labels()) == set(alice_2.labels())
assert dict(alice_1) == dict(alice_2)
def test_can_merge_unsaved_changes_when_querying_node(self):
a = Node("Person", name="Alice")
b = Node()
self.graph.create(a | b | Relationship(a, "KNOWS", b))
assert dict(a) == {"name": "Alice"}
a["age"] = 33
assert dict(a) == {"name": "Alice", "age": 33}
_ = list(self.graph.match(a, "KNOWS"))
assert dict(a) == {"name": "Alice", "age": 33}
def test_pull_node_labels_if_stale(self):
a = Node("Thing")
self.graph.create(a)
a.remove_label("Thing")
a._Node__stale.add("labels")
labels = a.labels()
assert set(labels) == {"Thing"}
def test_pull_node_property_if_stale(self):
a = Node(foo="bar")
self.graph.create(a)
a["foo"] = None
a._Node__stale.add("properties")
assert a["foo"] == "bar"
class AbstractNodeTestCase(GraphTestCase):
def test_can_create_unbound_node(self):
alice = Node(name="Alice", age=34)
assert isinstance(alice, Node)
assert not remote(alice)
assert alice["name"] == "Alice"
assert alice["age"] == 34
def test_node_equality(self):
alice_1 = Node(name="Alice", age=34)
alice_2 = Node(name="Alice", age=34)
assert set(alice_1.labels()) == set(alice_2.labels())
assert dict(alice_1) == dict(alice_2)
def test_node_inequality(self):
alice = Node(name="Alice", age=34)
bob = Node(name="Bob", age=56)
assert alice != bob
def test_node_is_never_equal_to_none(self):
alice = Node(name="Alice", age=34)
assert alice is not None
class ConcreteNodeTestCase(GraphTestCase):
def test_can_create_concrete_node(self):
alice = cast_node({"name": "Alice", "age": 34})
self.graph.create(alice)
assert isinstance(alice, Node)
assert remote(alice)
assert alice["name"] == "Alice"
assert alice["age"] == 34
def test_all_property_types(self):
data = {
"nun": None,
"yes": True,
"no": False,
"int": 42,
"float": 3.141592653589,
"long": long("9223372036854775807"),
"str": "hello, world",
"unicode": u"hello, world",
"boolean_list": [True, False, True, True, False],
"int_list": [1, 1, 2, 3, 5, 8, 13, 21, 35],
"str_list": ["red", "orange", "yellow", "green", "blue", "indigo", "violet"]
}
foo = cast_node(data)
self.graph.create(foo)
for key, value in data.items():
self.assertEqual(foo[key], value)
def test_cannot_assign_oversized_long(self):
foo = Node()
self.graph.create(foo)
with self.assertRaises(ValueError):
foo["long"] = long("9223372036854775808")
def test_cannot_assign_mixed_list(self):
foo = Node()
self.graph.create(foo)
with self.assertRaises(TypeError):
foo["mixed_list"] = [42, "life", "universe", "everything"]
def test_cannot_assign_dict(self):
foo = Node()
self.graph.create(foo)
with self.assertRaises(TypeError):
foo["dict"] = {"foo": 3, "bar": 4, "baz": 5}
def test_relative_uri_of_bound_node(self):
a = Node()
self.graph.create(a)
relative_uri_string = remote(a).ref
assert remote(a).uri.string.endswith(relative_uri_string)
assert relative_uri_string.startswith("node/")
def test_node_hashes(self):
node_1 = Node("Person", name="Alice")
self.graph.create(node_1)
node_2 = Node("Person", name="Alice")
node_2.__remote__ = RemoteEntity(remote(node_1).uri)
assert node_1 is not node_2
assert hash(node_1) == hash(node_2)
| apache-2.0 |
bell345/pseudo-interpreter | pseudo/parse.py | 1 | 12232 | #!/usr/bin/env python3
from .token import *
from .expr import *
from .code import *
def skip_eol(ctx):
res = ctx.raw_context()
token = ctx.peek_token()
while token == Token('eol', ''):
ctx.token()
res = ctx.raw_context()
token = ctx.peek_token()
return res
def pseudo_code_element(ctx):
with ctx.ready_context(skip_eol(ctx)):
res = pseudo_program(ctx)
if not res: res = statement(ctx)
return res.assoc(ctx)
def pseudo_program(ctx):
token = ctx.peek_token()
if token == Token('keyword', 'PROGRAM'):
ctx.token()
with ctx.ready_context():
ident = ctx.token()
if ident is None or ident.type != 'identifier':
raise ParseExpected(ctx, 'program name', ident)
with ctx.nest():
with ctx.ready_context(skip_eol(ctx)):
begin_kw = ctx.token()
if begin_kw != Token('keyword', 'BEGIN'):
raise ParseExpected(ctx, 'BEGIN', begin_kw)
statements = statement_list(ctx)
return PseudoProgram(ident.value, statements).assoc(ctx)
elif token == Token('keyword', 'MODULE'):
ctx.token()
with ctx.ready_context():
ident = ctx.token()
if ident is None or ident.type != 'identifier':
raise ParseExpected(ctx, 'program name', ident)
with ctx.nest():
params = []
while True:
with ctx.ready_context(skip_eol(ctx)):
kw = ctx.token()
if kw == Token('keyword', 'PARAM'):
name = ctx.token()
if name is None or name.type != 'identifier':
raise ParseExpected(ctx, 'parameter name', name)
params.append(name.value)
elif kw == Token('keyword', 'BEGIN'):
break
else:
raise ParseExpected(ctx, 'PARAM or BEGIN', kw)
statements = statement_list(ctx)
return PseudoModule(ident.value, params, statements).assoc(ctx)
def statement_list(ctx, end_kw='END', consume_end=True):
if isinstance(end_kw, str):
end_kw = (end_kw,)
def check_end():
skip_eol(ctx)
token = ctx.peek_token()
if token == Token('keyword', 'END'):
if consume_end:
ctx.token()
# treat END, END IF, END WHILE equally
qual = ctx.peek_token()
if qual.type == 'keyword':
ctx.token()
return True
elif token.type == 'keyword' and token.value in end_kw:
if consume_end:
ctx.token()
return True
with ctx.nest():
statements = []
while not check_end():
with ctx.ready_context():
stmt = statement(ctx)
if not stmt:
raise ParseExpected(ctx, 'statement')
statements.append(stmt)
return statements
def statement(ctx, no_eol=False):
res = assignment_stmt(ctx)
if not res: res = selection(ctx)
if not res: res = iteration(ctx)
if not res: res = jump(ctx)
if not res: res = io_statement(ctx)
if not res: res = expr_stmt(ctx)
if not no_eol:
with ctx.ready_context():
eol = ctx.token()
if eol != Token('eol', ''):
raise ParseExpected(ctx, 'end of statement', eol)
return res
def expr_stmt(ctx):
expr = expression(ctx)
if isinstance(expr, Expression):
return expr
elif isinstance(expr, Statement):
return expr
else:
return LiteralExpression(expr).assoc(ctx)
def selection(ctx):
if_kw = ctx.peek_token()
if if_kw == Token('keyword', 'IF'):
ctx.token()
with ctx.ready_context():
cond = conditional_expr(ctx)
if not cond:
raise ParseExpected(ctx, 'conditional')
then_kw = ctx.peek_token()
if then_kw.type == 'keyword' and then_kw.value in ('THEN', 'DO'):
ctx.token() # consume peek
stmt_list = statement_list(ctx, end_kw=('ELSE', 'END'), consume_end=False)
else_list = []
else_kw = ctx.peek_token()
if else_kw == Token('keyword', 'ELSE'):
ctx.token() # consume peek
if ctx.peek_token() == Token('keyword', 'IF'):
# if is left unconsumed
else_list = [statement(ctx, no_eol=True)]
else:
else_list = statement_list(ctx)
elif else_kw == Token('keyword', 'END'):
ctx.token() # consume END peek
if ctx.peek_token() == Token('keyword', 'IF'):
ctx.token() # consume END IF peek
return IfStatement(cond, stmt_list, else_list).assoc(ctx)
def iteration(ctx):
iter_kw = ctx.peek_token()
if iter_kw == Token('keyword', 'WHILE'):
ctx.token() # consume peek
with ctx.ready_context():
while_cond = conditional_expr(ctx)
if not while_cond:
raise ParseExpected(ctx, 'conditional')
then_kw = ctx.peek_token()
if then_kw.type == 'keyword' and then_kw.value in ('THEN', 'DO'):
ctx.token()
stmt_list = statement_list(ctx, end_kw='REPEAT')
return WhileStatement(while_cond, stmt_list).assoc(ctx)
elif iter_kw == Token('keyword', 'FOR'):
ctx.token() # consume peek
with ctx.ready_context():
start_expr = assignment_stmt(ctx)
if not start_expr:
raise ParseExpected(ctx, 'assignment')
with ctx.ready_context():
to_kw = ctx.token()
if to_kw != Token('keyword', 'TO'):
raise ParseExpected(ctx, 'TO', to_kw)
with ctx.ready_context():
end_expr = conditional_expr(ctx)
if not end_expr:
raise ParseExpected(ctx, 'expression')
then_kw = ctx.peek_token()
if then_kw.type == 'keyword' and then_kw.value in ('THEN', 'DO'):
ctx.token()
stmt_list = statement_list(ctx, end_kw='NEXT')
return ForStatement(start_expr, end_expr, stmt_list).assoc(ctx)
def jump(ctx):
jump_kw = ctx.peek_token()
if jump_kw == Token('keyword', 'BREAK'):
ctx.token()
return BreakStatement().assoc(ctx)
elif jump_kw == Token('keyword', 'CONTINUE'):
ctx.token()
return ContinueStatement().assoc(ctx)
elif jump_kw == Token('keyword', 'RETURN'):
ctx.token()
with ctx.ready_context():
ret = expression(ctx)
if not ret:
raise ParseExpected(ctx, 'expression')
return ReturnStatement(ret).assoc(ctx)
def io_statement(ctx):
io_kw = ctx.peek_token()
if io_kw == Token('keyword', 'RUN'):
ctx.token()
with ctx.ready_context():
ref = expression(ctx)
if not isinstance(ref, VariableReference):
raise ParseExpected(ctx, 'module reference', ref)
return KeywordExpression(io_kw, ref).assoc(ctx)
if io_kw == Token('keyword', 'INPUT'):
ctx.token()
type_ = Token('keyword', '')
type_kw = ctx.peek_token()
if type_kw.type == 'keyword':
type_ = type_kw
ctx.token()
with ctx.ready_context():
ref = expression(ctx)
if not isinstance(ref, VariableReference):
raise ParseExpected(ctx, 'variable reference', ref)
return KeywordExpression(io_kw, type_, ref).assoc(ctx)
elif io_kw.type == 'keyword' and io_kw.value in ('OUTPUT', 'PRINT'):
ctx.token()
args = argument_list(ctx)
return KeywordExpression(io_kw, *args).assoc(ctx)
def assignment_stmt(ctx):
target = ctx.peek_token()
if target.type == 'identifier':
ctx.token()
target = VariableReference(target.value).assoc(ctx)
with ctx.ready_context():
op = ctx.token()
if op.type != 'operator' or op.value not in ('=', '<-', ':='):
raise ParseExpected(ctx, 'assignment operator', op)
with ctx.ready_context():
inner = expression(ctx)
if not inner:
raise ParseExpected(ctx, 'expression')
return AssignmentStatement(target, inner).assoc(ctx)
def expression(ctx):
return conditional_expr(ctx)
def conditional_expr(ctx):
res = or_expr(ctx)
if res: return res
raise ParseExpected(ctx, 'conditional')
def unary_expr(ctx):
with ctx.ready_context():
op = ctx.peek_token()
if op.type != 'operator' or op.value not in UNARY_OPERATORS:
return postfix_expr(ctx)
ctx.token()
with ctx.ready_context():
arg = unary_expr(ctx)
if not arg:
raise ParseExpected(ctx, 'expression')
return UnaryExpression(op, arg).assoc(ctx)
def postfix_expr(ctx):
with ctx.ready_context():
arg = primary_expr(ctx)
if not arg:
raise ParseExpected(ctx, 'expression')
op = ctx.peek_token()
if op == Token('symbol', '('):
ctx.token()
if not isinstance(arg, VariableReference):
raise ParseExpected(ctx, 'module reference', arg)
args = []
end_bracket = ctx.peek_token()
if end_bracket == Token('symbol', ')'):
ctx.token()
else:
args = argument_list(ctx)
with ctx.ready_context():
end_bracket = ctx.peek_token()
if end_bracket != Token('symbol', ')'):
raise ParseExpected(ctx, ')', end_bracket)
ctx.token()
arg = ModuleReference(arg.name, args).assoc(ctx)
return arg
def argument_list(ctx):
args = []
while True:
with ctx.ready_context():
arg = expression(ctx)
if not arg:
raise ParseExpected(ctx, 'expression')
args.append(arg)
op = ctx.peek_token()
if op == Token('symbol', ','):
ctx.token()
continue
else:
return args
def primary_expr(ctx):
res = ctx.token()
#print("Got primary token: {}".format(res))
if res.type in ('number', 'string'):
return LiteralExpression(res).assoc(ctx)
elif res.type == 'identifier':
return VariableReference(res.value).assoc(ctx)
elif res != Token('symbol', '('):
raise ParseExpected(ctx, 'expression', res)
res = expression(ctx)
with ctx.ready_context():
end_bracket = ctx.token()
if end_bracket != Token('symbol', ')'):
raise ParseExpected(ctx, "')'", end_bracket)
return res
def _binary_expr(ops, _next_expr):
def _curr_expr(ctx):
with ctx.ready_context():
arg1 = _next_expr(ctx)
if not arg1:
raise ParseExpected(ctx, 'expression')
op = ctx.peek_token()
#print("Peeked bexpr: {}".format(op))
if op.value not in ops or op.type not in ('identifier', 'keyword', 'operator'):
return arg1
ctx.token()
with ctx.ready_context():
arg2 = _curr_expr(ctx)
if not arg2:
raise ParseExpected(ctx, 'expression')
return BinaryExpression(op, arg1, arg2).assoc(ctx)
return _curr_expr
multiply_expr = _binary_expr((MUL_OPERATORS + DIV_OPERATORS), unary_expr)
add_expr = _binary_expr((ADD_OPERATORS + SUB_OPERATORS), multiply_expr)
relation_expr = _binary_expr((LT_OPERATORS + GT_OPERATORS + LE_OPERATORS + GE_OPERATORS), add_expr)
eq_expr = _binary_expr((EQ_OPERATORS + NEQ_OPERATORS), relation_expr)
binary_and_expr = _binary_expr(BINARY_AND_OPERATORS, eq_expr)
binary_xor_expr = _binary_expr(BINARY_XOR_OPERATORS, binary_and_expr)
binary_or_expr = _binary_expr(BINARY_OR_OPERATORS, binary_xor_expr)
and_expr = _binary_expr(AND_OPERATORS, binary_or_expr)
or_expr = _binary_expr(OR_OPERATORS, and_expr) | mit |
hackerkid/zulip | corporate/migrations/0003_customerplan.py | 6 | 1658 | # Generated by Django 1.11.16 on 2018-12-22 21:05
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("corporate", "0002_customer_default_discount"),
]
operations = [
migrations.CreateModel(
name="CustomerPlan",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("licenses", models.IntegerField()),
("automanage_licenses", models.BooleanField(default=False)),
("charge_automatically", models.BooleanField(default=False)),
("price_per_license", models.IntegerField(null=True)),
("fixed_price", models.IntegerField(null=True)),
("discount", models.DecimalField(decimal_places=4, max_digits=6, null=True)),
("billing_cycle_anchor", models.DateTimeField()),
("billing_schedule", models.SmallIntegerField()),
("billed_through", models.DateTimeField()),
("next_billing_date", models.DateTimeField(db_index=True)),
("tier", models.SmallIntegerField()),
("status", models.SmallIntegerField(default=1)),
(
"customer",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="corporate.Customer"
),
),
],
),
]
| apache-2.0 |
cdcapano/pycbc | pycbc/waveform/multiband.py | 2 | 3724 | """ Tools and functions to calculate interpolate waveforms using multi-banding
"""
import numpy
from pycbc.types import TimeSeries, zeros
def multiband_fd_waveform(bands=None, lengths=None, overlap=0, **p):
""" Generate a fourier domain waveform using multibanding
Speed up generation of a fouerier domain waveform using multibanding. This
allows for multi-rate sampling of the frequeny space. Each band is
smoothed and stitched together to produce the final waveform. The base
approximant must support 'f_ref' and 'f_final'. The other parameters
must be chosen carefully by the user.
Parameters
----------
bands: list or str
The frequencies to split the waveform by. These should be chosen
so that the corresponding length include all the waveform's frequencies
within this band.
lengths: list or str
The corresponding length for each frequency band. This sets the
resolution of the subband and should be chosen carefully so that it is
sufficiently long to include all of the bands frequency content.
overlap: float
The frequency width to apply tapering between bands.
params: dict
The remaining keyworkd arguments passed to the base approximant
waveform generation.
Returns
-------
hp: pycbc.types.FrequencySeries
Plus polarization
hc: pycbc.type.FrequencySeries
Cross polarization
"""
from pycbc.waveform import get_fd_waveform
if isinstance(bands, str):
bands = [float(s) for s in bands.split(' ')]
if isinstance(lengths, str):
lengths = [float(s) for s in lengths.split(' ')]
p['approximant'] = p['base_approximant']
df = p['delta_f']
fmax = p['f_final']
flow = p['f_lower']
bands = [flow] + bands + [fmax]
dfs = [df] + [1.0 / l for l in lengths]
dt = 1.0 / (2.0 * fmax)
tlen = int(1.0 / dt / df)
flen = tlen / 2 + 1
wf_plus = TimeSeries(zeros(tlen, dtype=numpy.float32),
copy=False, delta_t=dt, epoch=-1.0/df)
wf_cross = TimeSeries(zeros(tlen, dtype=numpy.float32),
copy=False, delta_t=dt, epoch=-1.0/df)
# Iterate over the sub-bands
for i in range(len(lengths)+1):
taper_start = taper_end = False
if i != 0:
taper_start = True
if i != len(lengths):
taper_end = True
# Generate waveform for sub-band of full waveform
start = bands[i]
stop = bands[i+1]
p2 = p.copy()
p2['delta_f'] = dfs[i]
p2['f_lower'] = start
p2['f_final'] = stop
if taper_start:
p2['f_lower'] -= overlap / 2.0
if taper_end:
p2['f_final'] += overlap / 2.0
tlen = int(1.0 / dt / dfs[i])
flen = tlen / 2 + 1
hp, hc = get_fd_waveform(**p2)
# apply window function to smooth over transition regions
kmin = int(p2['f_lower'] / dfs[i])
kmax = int(p2['f_final'] / dfs[i])
taper = numpy.hanning(int(overlap * 2 / dfs[i]))
for wf, h in zip([wf_plus, wf_cross], [hp, hc]):
h = h.astype(numpy.complex64)
if taper_start:
h[kmin:kmin + len(taper) // 2] *= taper[:len(taper)//2]
if taper_end:
l, r = kmax - (len(taper) - len(taper) // 2), kmax
h[l:r] *= taper[len(taper)//2:]
# add frequency band to total and use fft to interpolate
h.resize(flen)
h = h.to_timeseries()
wf[len(wf)-len(h):] += h
return (wf_plus.to_frequencyseries().astype(hp.dtype),
wf_cross.to_frequencyseries().astype(hp.dtype))
| gpl-3.0 |
AlanCristhian/sugar | sugar.py | 1 | 16534 | """Python 3 functional programing experiments."""
import collections
import itertools
import inspect
import opcode
import types
__all__ = ["Let", "Expression", "Raise", "Where"]
def _make_closure_cell(value):
"""The types.FunctionType class ned a cell variable in the
closure argument. This function create such type with the value.
"""
def nested():
return value
return nested.__closure__[0]
def _change_op_code(position, op_code, instruction, old_names, new_constants,
custom_co_code, custom_co_names, custom_co_consts):
# NOTE: ensure that bellow variables are list because I need to mutate them
assert type(custom_co_code) is list
assert type(custom_co_names) is list
assert type(custom_co_consts) is list
if op_code == opcode.opmap[instruction]:
op_code_argument = custom_co_code[position + 1] + \
(custom_co_code[position + 2] << 8)
# Can't use the new_name variable directly because if I clean the
# name position get an IndexError.
name = old_names[op_code_argument]
if name in new_constants:
new_value = new_constants[name]
for value_position, existent_value in enumerate(custom_co_consts):
if new_value is existent_value:
break
# Add the new_value to custom_co_consts if such value not exists.
else:
value_position = len(custom_co_consts)
custom_co_consts.append(new_value)
if instruction == 'LOAD_DEREF':
custom_co_names.remove(name)
custom_co_code[position] = opcode.opmap['LOAD_CONST']
custom_co_code[position + 1] = value_position & 0xFF
custom_co_code[position + 2] = value_position >> 8
def _inject_constants(function, new_constants):
"""Return a copy of of the `function` parameter. This copy have
the new_constants defined in the `new_constants` map. If a key of
`new_constants` share the same name than a global or local object,
then replace such global or local by the value defined in the
`new_constants` argument.
"""
# Store in list because I need to mutate them.
custom_co_code = list(function.__code__.co_code)
custom_co_consts = list(function.__code__.co_consts)
custom_co_freevars = list(function.__code__.co_freevars)
custom_co_names = list(function.__code__.co_names)
# Walk the list of instructions and change 'custom_co_code',
# 'custom_co_consts', 'custom_co_freevars' and 'custom_co_names'.
enumerate_custom_co_code = enumerate(custom_co_code)
for position, op_code in enumerate_custom_co_code:
# Replace global lookups by the values defined in *new_constants*.
# function.__code__.co_names store names of all global variables.
_change_op_code(position, op_code, 'LOAD_GLOBAL',
function.__code__.co_names, new_constants,
# bellow variables are mutated by the function
custom_co_code, [], custom_co_consts)
# Replace local lookups by the values defined in *new_constants*.
# function.__code__.co_freevars store names of all local variables
_change_op_code(position, op_code, 'LOAD_DEREF',
function.__code__.co_freevars, new_constants,
# bellow variables are mutated by the function
custom_co_code, custom_co_freevars, custom_co_consts)
if op_code >= opcode.HAVE_ARGUMENT:
next(enumerate_custom_co_code)
next(enumerate_custom_co_code)
# create a new 'code object' (like function.__code__)
custom_code = types.CodeType(function.__code__.co_argcount,
function.__code__.co_kwonlyargcount,
function.__code__.co_nlocals,
function.__code__.co_stacksize,
function.__code__.co_flags,
bytes(custom_co_code),
tuple(custom_co_consts),
tuple(custom_co_names),
function.__code__.co_varnames,
function.__code__.co_filename,
function.__code__.co_name,
function.__code__.co_firstlineno,
function.__code__.co_lnotab,
tuple(custom_co_freevars),
function.__code__.co_cellvars)
# Customize the argument of the function object
_code = custom_code
_globals = function.__globals__
_name = function.__name__
_argdef = function.__defaults__
_closure = tuple(_make_closure_cell(variable) for variable in
custom_co_freevars)
# Make and return the new function
return types.FunctionType(_code, _globals, _name, _argdef, _closure)
_LEFT_OPERATOR = [
('__add__', '%s+(%s)'),
('__and__', '%s&(%s)'),
('__div__', '%s/(%s)'),
('__eq__', '%s==(%s)'),
('__floordiv__', '%s//(%s)'),
('__ge__', '%s>=(%s)'),
('__gt__', '%s>(%s)'),
('__le__', '%s<=(%s)'),
('__lshift__', '%s<<(%s)'),
('__lt__', '%s<(%s)'),
('__matmul__', '%s@(%s)'),
('__mod__', '%s%%(%s)'),
('__mul__', '%s*(%s)'),
('__ne__', '%s!=(%s)'),
('__or__', '%s|(%s)'),
('__pow__', '%s**(%s)'),
('__rshift__', '%s>>(%s)'),
('__sub__', '%s-(%s)'),
('__truediv__', '%s/(%s)'),
('__xor__', '%s^(%s)'),
]
_RIGHT_OPERATOR = [
('__radd__', '(%s)+%s'),
('__rand__', '(%s)&%s'),
('__rdiv__', '(%s)/%s'),
('__rfloordiv__', '(%s)//%s'),
('__rlshift__', '(%s)<<%s'),
('__rmatmul__', '(%s)@%s'),
('__rmod__', '(%s)%%%s'),
('__rmul__', '(%s)*%s'),
('__ror__', '(%s)|%s'),
('__rpow__', '(%s)**%s'),
('__rrshift__', '(%s)>>%s'),
('__rsub__', '(%s)-%s'),
('__rtruediv__', '(%s)/%s'),
('__rxor__', '(%s)^%s'),
]
_UNARY_OPERATOR = [
('__invert__', '~(%s)'),
('__neg__', '-(%s)'),
('__pos__', '+(%s)'),
]
_BUILT_IN_FUNCTIONS = [
('__abs__', 'abs(%s%s%s)'),
('__round__', 'round(%s%s%s)'),
('__reversed__', 'reversed(%s%s%s)'),
# FIXME: folowing methods did not work. View FailedExpressionBehaviours
# class in the tests/test_suger.py module.
# ('__instancecheck__', 'isinstance(%s%s%s)'),
# ('__subclasscheck__', 'issubclass(%s%s%s)'),
# ('__contains__', 'contains(%s%s%s)'),
# ('__len__', 'len(%s%s%s)'),
# ('__iter__', 'iter(%s%s%s)'),
# TODO:
# ('__bytes__', 'bytes(%s%s%s)'),
# ('__format__', 'format(%s%s%s)'),
# ('__hash__', 'hash(%s%s%s)'),
# ('__bool__', 'bool(%s%s%s)'),
# ('__setattr__', 'setattr(%s%s%s)'),
# ('__delattr__', 'delattr(%s%s%s)'),
# ('__dir__', 'dir(%s%s%s)'),
]
def _left_operator(template):
"""Return a function that make an expression
string with a binary left operator.
"""
def operator(self, other):
result = Expression("")
if hasattr(other, '__expr__'):
result.__expr__ = template % (self.__expr__, other.__expr__)
else:
result.__expr__ = template % (self.__expr__, repr(other))
return result
return operator
def _right_operator(template):
"""Return a function that make an expression string with
an binary operator placed at the right of the variable.
"""
def operator(self, other):
result = Expression("")
if hasattr(other, '__expr__'):
result.__expr__ = template % (other.__expr__, self.__expr__)
else:
result.__expr__ = template % (repr(other), self.__expr__)
return result
return operator
def _unary_operator(template):
"""Return a function that make an
expression string with an unary operator.
"""
def operator(self):
result = Expression("")
result.__expr__ = template % self.__expr__
return result
return operator
# The __call__ method difer of the other special methods in the serparator
# variable. So, I add such variable as default argument.
def _built_in_function(template, separator=', '):
"""Return a function that make an
expression with an built in function.
"""
def function(self, *args, **kwds):
formated_kwds, formated_args = "", ""
if args != ():
formated_args = separator + repr(args)[1:][:-2]
if kwds != {}:
add_equal = ('%s=%r' % (key, value) for key, value in kwds.items())
formated_kwds = ', ' + ', '.join(add_equal)
result = Expression("")
result.__expr__ = template % (self.__expr__, formated_args,
formated_kwds)
return result
return function
class _DefineAllOperatorsMeta(type):
"""All operators of the new class will
return an instance of the Expression class.
"""
def __new__(cls, name, bases, namespace):
namespace.update({function: _left_operator(template) for
function, template in _LEFT_OPERATOR})
namespace.update({function: _right_operator(template) for
function, template in _RIGHT_OPERATOR})
namespace.update({function: _unary_operator(template) for
function, template in _UNARY_OPERATOR})
namespace.update({function: _built_in_function(template) for
function, template in _BUILT_IN_FUNCTIONS})
call_method = _built_in_function(template='%s(%s%s)', separator="")
namespace.update({'__call__': call_method})
new_class = super().__new__(cls, name, bases, namespace)
return new_class
class Expression(metaclass=_DefineAllOperatorsMeta):
"""Create an object that store all
math operations in which it is involved.
"""
def __init__(self, name, bases=()):
self.__expr__ = name
def __repr__(self):
return self.__expr__
def __getattr__(self, attr):
result = Expression("")
result.__expr__ = '(%s).%s' % (self.__expr__, attr)
return result
def __getitem__(self, attr):
result = Expression("")
result.__expr__ = '(%s)[%r]' % (self.__expr__, attr)
return result
def __hash__(self):
return hash(self.__expr__)
class _Body:
def __init__(self, expression):
self.environ = {"docstring": "", "arguments": "", "expression": "",
"constants": "", "pattern": ""}
def where(self, **constants):
"""Inject constatns in the function."""
if constants:
formated = '\n'.join(" %s = %r" % (key, value) for
key, value in constants.items()) + '\n'
self.environ["constants"] = formated
return self
class Do(_Body):
def __init__(self, body):
super().__init__(body)
self.body = body
class Ward(_Body):
def __init__(self, pattern):
super().__init__(pattern)
self.pattern = pattern
class Raise:
def __init__(self, error, message=None):
self.error = error if message is None else error(message)
def _convert_variables_to_exressions(function):
"""Replace defined locals and globals by an Expression object."""
globals_and_locals = itertools.chain(function.__code__.co_names,
function.__code__.co_freevars)
constants = {}
for var in globals_and_locals:
if var in function.__code__.co_consts:
# The first const everything is None. So I remove them
constant_list = function.__code__.co_consts[1:]
# Now I make a dictionary
constant_dict = {name: Expression(name) for name in
constant_list if name == var}
constants.update(constant_dict)
# Closures also will be an Expression object
constants.update({name: Expression(name) for name in
function.__code__.co_freevars})
new_function = _inject_constants(function, constants)
return new_function
class Where(dict):
def __rand__(self, other):
return self, other
class Let:
def __init__(self, name, lambda_function):
self.name = name
lambda_with_built_in = _inject_constants(lambda_function,
{"where": Where,
"throw": Raise})
self.function = _convert_variables_to_exressions(lambda_with_built_in)
self.template = ("def {function_name}{arguments}:\n"
"{docstring}"
"{constants}"
"{pattern}"
"{expression}")
self.expression = self.make_expression()
self.expression.environ["function_name"] = self.name
self.expression.environ["arguments"] = self.make_signature()
# Decide if is recursive or not
self.is_recursive = False
if self.name in lambda_function.__code__.co_names or \
self.name in lambda_function.__code__.co_freevars:
if isinstance(self.expression, Do):
self.is_recursive = True
elif isinstance(self.expression, Ward):
for value in self.expression.pattern.values():
if isinstance(value, Expression) and \
(self.name + "(") in value:
self.is_recursive = True
break
# make the body of the function
if self.is_recursive:
pass
else:
if isinstance(self.expression, Ward):
self.expression.environ["expression"] = self.make_ward_body()
elif isinstance(self.expression, Do):
self.expression.environ["expression"] = self.make_do_body()
self.source = self.template.format(**self.expression.environ)
def make_expression(self):
parameters = self.function.__code__.co_varnames
arguments = (Expression(arg) for arg in parameters)
expression = self.function(*arguments)
if isinstance(expression, tuple):
constants, expression = expression
else:
constants = {}
if isinstance(expression, list):
expression = collections.OrderedDict(expression)
if isinstance(expression, set):
return Do(expression.pop()).where(**constants)
elif isinstance(expression, dict):
return Ward(expression).where(**constants)
def make_do_body(self):
if isinstance(self.expression, Expression):
return " return %s\n" % expression.__expr__
elif isinstance(self.expression.body, Raise):
return " raise %r\n" % self.expression.body.error
else:
return " return %r\n" % self.expression.body
def make_ward_body(self):
if_expression = elif_expression = else_expression = ""
pattern = self.expression.pattern.copy()
for key, value in pattern.items():
if isinstance(value, Raise):
pattern[key] = "raise %r" % value.error
else:
pattern[key] = "return %r" % value
if 'otherwise' in pattern and len(pattern) > 1:
value = pattern.pop('otherwise')
else_expression = " else:\n %s\n" % value
if len(pattern) == 1:
if_expression = " if %s:\n %s\n" % \
next(iter(pattern.items()))
else:
iter_pattern = iter(pattern.items())
if_expression = " if %s:\n %s\n" % next(iter_pattern)
elif_expression = "".join(' elif %s:\n %s\n' %
(key, value) for key, value in
iter_pattern)
return if_expression + elif_expression + else_expression
def make_signature(self):
arguments = inspect.getargspec(self.function)
formated_arguments = inspect.formatargspec(*arguments)
return formated_arguments
def all(items):
return "__builtins__.all(%s)" % items
def any(items):
return "__builtins__.any(%s)" % items
| mit |
marwoodandrew/superdesk-aap | server/aap/macros/length_feet_and_inches_to_metric_test.py | 2 | 2700 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import unittest
from .length_feet_and_inches_to_metric import feet_inches_to_metric
class FeetInchesTestCase(unittest.TestCase):
def test_feet_to_cm(self):
text = '''His height is 5'10" today
His height is 5'10 today
His height is 5' 10 today
His height is 5 ft 10 today
His height is 5 ft. 10 today
His height is 5 ft 10 in today
His height is 5' 10" today
His height is 5' today
His height is 5 ft today
His height is 5 feet today
His height is 10.2-ft today
His height is 10.2-in today
His height is 1,020" today
His height is 1,020 in today
His height is 1 inch today
His height is 1,020 inches today
His height is 10-12 ft today
His height is 10-12 in today
His height is 5-foot-10 today
His height is 5-feet-10 today'''
item = {'body_html': text}
res, diff = feet_inches_to_metric(item)
self.assertEqual(diff['5\'10"'], '5\'10" (1.78 m)')
self.assertEqual(diff['5\'10'], '5\'10 (1.78 m)')
self.assertEqual(diff['5\' 10'], '5\' 10 (1.78 m)')
self.assertEqual(diff['5 ft 10'], '5 ft 10 (1.78 m)')
self.assertEqual(diff['5 ft. 10'], '5 ft. 10 (1.78 m)')
self.assertEqual(diff['5 ft 10 in'], '5 ft 10 in (1.78 m)')
self.assertEqual(diff['5\' 10"'], '5\' 10" (1.78 m)')
self.assertEqual(diff['5\''], '5\' (1.52 m)')
self.assertEqual(diff['5 ft'], '5 ft (1.52 m)')
self.assertEqual(diff['5 feet'], '5 feet (1.52 m)')
self.assertEqual(diff['10.2-ft'], '10.2-ft (3.11 m)')
self.assertEqual(diff['10.2-in'], '10.2-in (25.91 cm)')
self.assertEqual(diff['1,020"'], '1,020" (25.91 m)')
self.assertEqual(diff['1,020 in'], '1,020 in (25.91 m)')
self.assertEqual(diff['1 inch'], '1 inch (2.54 cm)')
self.assertEqual(diff['1,020 inches'], '1,020 inches (25.91 m)')
self.assertEqual(diff['10-12 ft'], '10-12 ft (3.05-3.66 m)')
self.assertEqual(diff['10-12 in'], '10-12 in (25.40-30.48 cm)')
self.assertEqual(diff['5-foot-10'], '5-foot-10 (1.78 m)')
self.assertEqual(diff['5-feet-10'], '5-feet-10 (1.78 m)')
| agpl-3.0 |
marcore/edx-platform | openedx/core/djangoapps/course_groups/tests/test_partition_scheme.py | 11 | 16146 | """
Test the partitions and partitions service
"""
import json
from django.conf import settings
import django.test
from mock import patch
from nose.plugins.attrib import attr
from unittest import skipUnless
from courseware.masquerade import handle_ajax, setup_masquerade
from courseware.tests.test_masquerade import StaffMasqueradeTestCase
from student.tests.factories import UserFactory
from xmodule.partitions.partitions import Group, UserPartition, UserPartitionError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, TEST_DATA_MIXED_MODULESTORE
from xmodule.modulestore.tests.factories import ToyCourseFactory
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from openedx.core.djangoapps.user_api.partition_schemes import RandomUserPartitionScheme
from ..partition_scheme import CohortPartitionScheme, get_cohorted_user_partition
from ..models import CourseUserGroupPartitionGroup
from ..views import link_cohort_to_partition_group, unlink_cohort_partition_group
from ..cohorts import add_user_to_cohort, remove_user_from_cohort, get_course_cohorts
from .helpers import CohortFactory, config_course_cohorts
@attr('shard_2')
class TestCohortPartitionScheme(ModuleStoreTestCase):
"""
Test the logic for linking a user to a partition group based on their cohort.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a course with cohort configuration, partition and groups,
and a student for each test.
"""
super(TestCohortPartitionScheme, self).setUp()
self.course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.course_key)
config_course_cohorts(self.course, is_cohorted=True)
self.groups = [Group(10, 'Group 10'), Group(20, 'Group 20')]
self.user_partition = UserPartition(
0,
'Test Partition',
'for testing purposes',
self.groups,
scheme=CohortPartitionScheme
)
self.student = UserFactory.create()
def assert_student_in_group(self, group, partition=None):
"""
Utility for checking that our test student comes up as assigned to the
specified partition (or, if None, no partition at all)
"""
self.assertEqual(
CohortPartitionScheme.get_group_for_user(
self.course_key,
self.student,
partition or self.user_partition,
use_cached=False
),
group
)
def test_student_cohort_assignment(self):
"""
Test that the CohortPartitionScheme continues to return the correct
group for a student as the student is moved in and out of different
cohorts.
"""
first_cohort, second_cohort = [
CohortFactory(course_id=self.course_key) for _ in range(2)
]
# place student 0 into first cohort
add_user_to_cohort(first_cohort, self.student.username)
self.assert_student_in_group(None)
# link first cohort to group 0 in the partition
link_cohort_to_partition_group(
first_cohort,
self.user_partition.id,
self.groups[0].id,
)
# link second cohort to to group 1 in the partition
link_cohort_to_partition_group(
second_cohort,
self.user_partition.id,
self.groups[1].id,
)
self.assert_student_in_group(self.groups[0])
# move student from first cohort to second cohort
add_user_to_cohort(second_cohort, self.student.username)
self.assert_student_in_group(self.groups[1])
# move the student out of the cohort
remove_user_from_cohort(second_cohort, self.student.username)
self.assert_student_in_group(None)
def test_cohort_partition_group_assignment(self):
"""
Test that the CohortPartitionScheme returns the correct group for a
student in a cohort when the cohort link is created / moved / deleted.
"""
test_cohort = CohortFactory(course_id=self.course_key)
# assign user to cohort (but cohort isn't linked to a partition group yet)
add_user_to_cohort(test_cohort, self.student.username)
# scheme should not yet find any link
self.assert_student_in_group(None)
# link cohort to group 0
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[0].id,
)
# now the scheme should find a link
self.assert_student_in_group(self.groups[0])
# link cohort to group 1 (first unlink it from group 0)
unlink_cohort_partition_group(test_cohort)
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[1].id,
)
# scheme should pick up the link
self.assert_student_in_group(self.groups[1])
# unlink cohort from anywhere
unlink_cohort_partition_group(
test_cohort,
)
# scheme should now return nothing
self.assert_student_in_group(None)
def test_student_lazily_assigned(self):
"""
Test that the lazy assignment of students to cohorts works
properly when accessed via the CohortPartitionScheme.
"""
# don't assign the student to any cohort initially
self.assert_student_in_group(None)
# get the default cohort, which is automatically created
# during the `get_course_cohorts` API call if it doesn't yet exist
cohort = get_course_cohorts(self.course)[0]
# map that cohort to a group in our partition
link_cohort_to_partition_group(
cohort,
self.user_partition.id,
self.groups[0].id,
)
# The student will be lazily assigned to the default cohort
# when CohortPartitionScheme.get_group_for_user makes its internal
# call to cohorts.get_cohort.
self.assert_student_in_group(self.groups[0])
def setup_student_in_group_0(self):
"""
Utility to set up a cohort, add our student to the cohort, and link
the cohort to self.groups[0]
"""
test_cohort = CohortFactory(course_id=self.course_key)
# link cohort to group 0
link_cohort_to_partition_group(
test_cohort,
self.user_partition.id,
self.groups[0].id,
)
# place student into cohort
add_user_to_cohort(test_cohort, self.student.username)
# check link is correct
self.assert_student_in_group(self.groups[0])
def test_partition_changes_nondestructive(self):
"""
If the name of a user partition is changed, or a group is added to the
partition, links from cohorts do not break.
If the name of a group is changed, links from cohorts do not break.
"""
self.setup_student_in_group_0()
# to simulate a non-destructive configuration change on the course, create
# a new partition with the same id and scheme but with groups renamed and
# a group added
new_groups = [Group(10, 'New Group 10'), Group(20, 'New Group 20'), Group(30, 'New Group 30')]
new_user_partition = UserPartition(
0, # same id
'Different Partition',
'dummy',
new_groups,
scheme=CohortPartitionScheme,
)
# the link should still work
self.assert_student_in_group(new_groups[0], new_user_partition)
def test_missing_group(self):
"""
If the group is deleted (or its id is changed), there's no referential
integrity enforced, so any references from cohorts to that group will be
lost. A warning should be logged when links are found from cohorts to
groups that no longer exist.
"""
self.setup_student_in_group_0()
# to simulate a destructive change on the course, create a new partition
# with the same id, but different group ids.
new_user_partition = UserPartition(
0, # same id
'Another Partition',
'dummy',
[Group(11, 'Not Group 10'), Group(21, 'Not Group 20')], # different ids
scheme=CohortPartitionScheme,
)
# the partition will be found since it has the same id, but the group
# ids aren't present anymore, so the scheme returns None (and logs a
# warning)
with patch('openedx.core.djangoapps.course_groups.partition_scheme.log') as mock_log:
self.assert_student_in_group(None, new_user_partition)
self.assertTrue(mock_log.warn.called)
self.assertRegexpMatches(mock_log.warn.call_args[0][0], 'group not found')
def test_missing_partition(self):
"""
If the user partition is deleted (or its id is changed), there's no
referential integrity enforced, so any references from cohorts to that
partition's groups will be lost. A warning should be logged when links
are found from cohorts to partitions that do not exist.
"""
self.setup_student_in_group_0()
# to simulate another destructive change on the course, create a new
# partition with a different id, but using the same groups.
new_user_partition = UserPartition(
1, # different id
'Moved Partition',
'dummy',
[Group(10, 'Group 10'), Group(20, 'Group 20')], # same ids
scheme=CohortPartitionScheme,
)
# the partition will not be found even though the group ids match, so the
# scheme returns None (and logs a warning).
with patch('openedx.core.djangoapps.course_groups.partition_scheme.log') as mock_log:
self.assert_student_in_group(None, new_user_partition)
self.assertTrue(mock_log.warn.called)
self.assertRegexpMatches(mock_log.warn.call_args[0][0], 'partition mismatch')
@attr('shard_2')
class TestExtension(django.test.TestCase):
"""
Ensure that the scheme extension is correctly plugged in (via entry point
in setup.py)
"""
def test_get_scheme(self):
self.assertEqual(UserPartition.get_scheme('cohort'), CohortPartitionScheme)
with self.assertRaisesRegexp(UserPartitionError, 'Unrecognized scheme'):
UserPartition.get_scheme('other')
@attr('shard_2')
class TestGetCohortedUserPartition(ModuleStoreTestCase):
"""
Test that `get_cohorted_user_partition` returns the first user_partition with scheme `CohortPartitionScheme`.
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Regenerate a course with cohort configuration, partition and groups,
and a student for each test.
"""
super(TestGetCohortedUserPartition, self).setUp()
self.course_key = ToyCourseFactory.create().id
self.course = modulestore().get_course(self.course_key)
self.student = UserFactory.create()
self.random_user_partition = UserPartition(
1,
'Random Partition',
'Should not be returned',
[Group(0, 'Group 0'), Group(1, 'Group 1')],
scheme=RandomUserPartitionScheme
)
self.cohort_user_partition = UserPartition(
0,
'Cohort Partition 1',
'Should be returned',
[Group(10, 'Group 10'), Group(20, 'Group 20')],
scheme=CohortPartitionScheme
)
self.second_cohort_user_partition = UserPartition(
2,
'Cohort Partition 2',
'Should not be returned',
[Group(10, 'Group 10'), Group(1, 'Group 1')],
scheme=CohortPartitionScheme
)
def test_returns_first_cohort_user_partition(self):
"""
Test get_cohorted_user_partition returns first user_partition with scheme `CohortPartitionScheme`.
"""
self.course.user_partitions.append(self.random_user_partition)
self.course.user_partitions.append(self.cohort_user_partition)
self.course.user_partitions.append(self.second_cohort_user_partition)
self.assertEqual(self.cohort_user_partition, get_cohorted_user_partition(self.course))
def test_no_cohort_user_partitions(self):
"""
Test get_cohorted_user_partition returns None when there are no cohorted user partitions.
"""
self.course.user_partitions.append(self.random_user_partition)
self.assertIsNone(get_cohorted_user_partition(self.course))
@attr('shard_2')
class TestMasqueradedGroup(StaffMasqueradeTestCase):
"""
Check for staff being able to masquerade as belonging to a group.
"""
def setUp(self):
super(TestMasqueradedGroup, self).setUp()
self.user_partition = UserPartition(
0, 'Test User Partition', '',
[Group(0, 'Group 1'), Group(1, 'Group 2')],
scheme_id='cohort'
)
self.course.user_partitions.append(self.user_partition)
self.session = {}
modulestore().update_item(self.course, self.test_user.id)
def _verify_masquerade_for_group(self, group):
"""
Verify that the masquerade works for the specified group id.
"""
# Send the request to set the masquerade
request_json = {
"role": "student",
"user_partition_id": self.user_partition.id,
"group_id": group.id if group is not None else None
}
request = self._create_mock_json_request(
self.test_user,
body=json.dumps(request_json),
session=self.session
)
handle_ajax(request, unicode(self.course.id))
# Now setup the masquerade for the test user
setup_masquerade(request, self.test_user, True)
scheme = self.user_partition.scheme
self.assertEqual(
scheme.get_group_for_user(self.course.id, self.test_user, self.user_partition),
group
)
def _verify_masquerade_for_all_groups(self):
"""
Verify that the staff user can masquerade as being in all groups
as well as no group.
"""
self._verify_masquerade_for_group(self.user_partition.groups[0])
self._verify_masquerade_for_group(self.user_partition.groups[1])
self._verify_masquerade_for_group(None)
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade(self):
"""
Tests that a staff member can masquerade as being in a particular group.
"""
self._verify_masquerade_for_all_groups()
@skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in LMS')
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_group_masquerade_with_cohort(self):
"""
Tests that a staff member can masquerade as being in a particular group
when that staff member also belongs to a cohort with a corresponding
group.
"""
self.course.cohort_config = {'cohorted': True}
modulestore().update_item(self.course, self.test_user.id) # pylint: disable=no-member
cohort = CohortFactory.create(course_id=self.course.id, users=[self.test_user])
CourseUserGroupPartitionGroup(
course_user_group=cohort,
partition_id=self.user_partition.id,
group_id=self.user_partition.groups[0].id
).save()
# When the staff user is masquerading as being in a None group
# (within an existent UserPartition), we should treat that as
# an explicit None, not defaulting to the user's cohort's
# partition group.
self._verify_masquerade_for_all_groups()
| agpl-3.0 |
pridemusvaire/yowsup | yowsup/demos/contacts/layer.py | 61 | 1230 | from yowsup.layers.interface import YowInterfaceLayer, ProtocolEntityCallback
from yowsup.layers.protocol_contacts.protocolentities import GetSyncIqProtocolEntity, ResultSyncIqProtocolEntity
from yowsup.layers.protocol_iq.protocolentities import ErrorIqProtocolEntity
import threading
import logging
logger = logging.getLogger(__name__)
class SyncLayer(YowInterfaceLayer):
PROP_CONTACTS = "org.openwhatsapp.yowsup.prop.syncdemo.contacts"
def __init__(self):
super(SyncLayer, self).__init__()
#call back function when there is a successful connection to whatsapp server
@ProtocolEntityCallback("success")
def onSuccess(self, successProtocolEntity):
contacts= self.getProp(self.__class__.PROP_CONTACTS, [])
contactEntity = GetSyncIqProtocolEntity(contacts)
self._sendIq(contactEntity, self.onGetSyncResult, self.onGetSyncError)
def onGetSyncResult(self, resultSyncIqProtocolEntity, originalIqProtocolEntity):
print(resultSyncIqProtocolEntity)
raise KeyboardInterrupt()
def onGetSyncError(self, errorSyncIqProtocolEntity, originalIqProtocolEntity):
print(errorSyncIqProtocolEntity)
raise KeyboardInterrupt()
| gpl-3.0 |
hehongliang/tensorflow | tensorflow/contrib/receptive_field/python/util/graph_compute_order_test.py | 46 | 5947 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for graph_compute_order module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim
from tensorflow.contrib.receptive_field import receptive_field_api as receptive_field
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
def create_test_network():
"""Convolutional neural network for test.
Returns:
g: Tensorflow graph object (Graph proto).
"""
g = ops.Graph()
with g.as_default():
# An input test image with unknown spatial resolution.
x = array_ops.placeholder(
dtypes.float32, (None, None, None, 1), name='input_image')
# Left branch before first addition.
l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID')
# Right branch before first addition.
l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]], name='L2_pad')
l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID')
l3 = slim.max_pool2d(l2, [3, 3], stride=2, scope='L3', padding='SAME')
# First addition.
l4 = nn.relu(l1 + l3, name='L4_relu')
# Left branch after first addition.
l5 = slim.conv2d(l4, 1, [1, 1], stride=2, scope='L5', padding='SAME')
# Right branch after first addition.
l6 = slim.conv2d(l4, 1, [3, 3], stride=2, scope='L6', padding='SAME')
# Final addition.
gen_math_ops.add(l5, l6, name='L7_add')
return g
class GraphComputeOrderTest(test.TestCase):
def check_topological_sort_and_sizes(self,
node_info,
expected_input_sizes=None,
expected_output_sizes=None):
"""Helper function to check topological sorting and sizes are correct.
The arguments expected_input_sizes and expected_output_sizes are used to
check that the sizes are correct, if they are given.
Args:
node_info: Default dict keyed by node name, mapping to a named tuple with
the following keys: {order, node, input_size, output_size}.
expected_input_sizes: Dict mapping node names to expected input sizes
(optional).
expected_output_sizes: Dict mapping node names to expected output sizes
(optional).
"""
# Loop over nodes in sorted order, collecting those that were already seen.
# These will be used to make sure that the graph is topologically sorted.
# At the same time, we construct dicts from node name to input/output size,
# which will be used to check those.
already_seen_nodes = []
input_sizes = {}
output_sizes = {}
for _, (_, node, input_size, output_size) in sorted(
node_info.items(), key=lambda x: x[1].order):
for inp_name in node.input:
# Since the graph is topologically sorted, the inputs to the current
# node must have been seen beforehand.
self.assertIn(inp_name, already_seen_nodes)
input_sizes[node.name] = input_size
output_sizes[node.name] = output_size
already_seen_nodes.append(node.name)
# Check input sizes, if desired.
if expected_input_sizes is not None:
for k, v in expected_input_sizes.items():
self.assertIn(k, input_sizes)
self.assertEqual(input_sizes[k], v)
# Check output sizes, if desired.
if expected_output_sizes is not None:
for k, v in expected_output_sizes.items():
self.assertIn(k, output_sizes)
self.assertEqual(output_sizes[k], v)
def testGraphOrderIsCorrect(self):
"""Tests that the order and sizes of create_test_network() are correct."""
graph_def = create_test_network().as_graph_def()
# Case 1: Input node name/size are not given.
node_info, _ = receptive_field.get_compute_order(graph_def)
self.check_topological_sort_and_sizes(node_info)
# Case 2: Input node name is given, but not size.
node_info, _ = receptive_field.get_compute_order(
graph_def, input_node_name='input_image')
self.check_topological_sort_and_sizes(node_info)
# Case 3: Input node name and size (224) are given.
node_info, _ = receptive_field.get_compute_order(
graph_def, input_node_name='input_image', input_node_size=[224, 224])
expected_input_sizes = {
'input_image': None,
'L1/Conv2D': [224, 224],
'L2_pad': [224, 224],
'L2/Conv2D': [225, 225],
'L3/MaxPool': [112, 112],
'L4_relu': [56, 56],
'L5/Conv2D': [56, 56],
'L6/Conv2D': [56, 56],
'L7_add': [28, 28],
}
expected_output_sizes = {
'input_image': [224, 224],
'L1/Conv2D': [56, 56],
'L2_pad': [225, 225],
'L2/Conv2D': [112, 112],
'L3/MaxPool': [56, 56],
'L4_relu': [56, 56],
'L5/Conv2D': [28, 28],
'L6/Conv2D': [28, 28],
'L7_add': [28, 28],
}
self.check_topological_sort_and_sizes(node_info, expected_input_sizes,
expected_output_sizes)
if __name__ == '__main__':
test.main()
| apache-2.0 |
LouisePaulDelvaux/openfisca-france-data | openfisca_france_data/collection_builders/erfs_collection_builder.py | 4 | 3767 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ConfigParser
import getpass
import logging
import os
import pkg_resources
from openfisca_survey_manager.scripts.surv import add_survey_to_collection, create_data_file_by_format
from openfisca_survey_manager.survey_collections import SurveyCollection
openfisca_france_data_location = pkg_resources.get_distribution('openfisca-france-data').location
config_files_directory = os.path.join(openfisca_france_data_location)
log = logging.getLogger(__name__)
def build_erfs_survey_collection(years = None, erase = False, overwrite = False):
if years is None:
log.error("A list of years to process is needed")
if erase:
erfs_survey_collection = SurveyCollection(
name = "erfs", config_files_directory = config_files_directory)
else:
try:
erfs_survey_collection = SurveyCollection.load(
collection = 'erfs', config_files_directory = config_files_directory)
except ConfigParser.NoOptionError:
erfs_survey_collection = SurveyCollection(
name = "erfs", config_files_directory = config_files_directory)
input_data_directory = erfs_survey_collection.config.get('data', 'input_directory')
if getpass.getuser() == 'benjello':
input_data_directory = os.path.join(os.path.dirname(input_data_directory), 'INSEE')
else:
input_data_directory = os.path.dirname(input_data_directory)
for year in years:
data_directory_path = os.path.join(
input_data_directory,
'ERF/ERFS_{}'.format(year)
)
data_file_by_format = create_data_file_by_format(data_directory_path)
survey_name = 'erfs_{}'.format(year)
add_survey_to_collection(
survey_name = survey_name,
survey_collection = erfs_survey_collection,
sas_files = data_file_by_format['sas'],
)
collections_directory = erfs_survey_collection.config.get('collections', 'collections_directory')
collection_json_path = os.path.join(collections_directory, "erfs" + ".json")
erfs_survey_collection.dump(json_file_path = collection_json_path)
surveys = [survey for survey in erfs_survey_collection.surveys if survey.name.endswith(str(year))]
erfs_survey_collection.fill_hdf(source_format = 'sas', surveys = surveys, overwrite = overwrite)
return erfs_survey_collection
if __name__ == '__main__':
import logging
import sys
import datetime
start_time = datetime.datetime.now()
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
years = [2006, 2007, 2008, 2009]
erfs_survey_collection = build_erfs_survey_collection(years = years, erase = True,
overwrite = False)
log.info("The program have been executed in {}".format(datetime.datetime.now() - start_time))
| agpl-3.0 |
akaariai/django | tests/auth_tests/test_forms.py | 21 | 23635 | from __future__ import unicode_literals
import datetime
import re
from django import forms
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm,
ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget, SetPasswordForm,
UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field
from django.test import TestCase, override_settings
from django.utils import translation
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from .settings import AUTH_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient2@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='empty_password@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='unmanageable_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='unknown_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[force_text(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [force_text(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
def test_success(self):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(repr(u), '<User: jsmith@example.com>')
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})])
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(),
[force_text(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors,
[force_text(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors,
[force_text(form.error_messages['password_mismatch'])])
def test_success(self):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields),
['old_password', 'new_password1', 'new_password2'])
@override_settings(USE_TZ=False, PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'])
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [force_text(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super(MyUserForm, self).__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unsuable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."),
form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password'], 'sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161')
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super(PasswordResetFormTest, cls).setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[("Really sorry to hear you forgot your password.",
"text/html")]).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Test that inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(
re.match(r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload())
)
class ReadOnlyPasswordHashTest(TestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
| bsd-3-clause |
vintasoftware/tapioca-instagram | tapioca_instagram/resource_mapping.py | 1 | 4415 | # coding: utf-8
RESOURCE_MAPPING = {
'user': {
'resource': 'users/{id}',
'docs': 'https://instagram.com/developer/endpoints/users/#get_users',
'methods': ['GET']
},
'self_feed': {
'resource': 'users/self/feed',
'docs': 'https://instagram.com/developer/endpoints/users/#get_users_feed',
'methods': ['GET']
},
'self_media_liked': {
'resource': 'users/self/media/liked',
'docs': 'https://instagram.com/developer/endpoints/users/#get_users_feed_liked',
'methods': ['GET']
},
'self_requested_by': {
'resource': 'users/self/requested-by',
'docs': 'https://instagram.com/developer/endpoints/relationships/#get_incoming_requests',
'methods': ['GET']
},
'user_media_recent': {
'resource': 'users/{id}/media/recent',
'docs': 'https://instagram.com/developer/endpoints/users/#get_users_media_recent',
'methods': ['GET']
},
'user_search': {
'resource': 'users/search',
'docs': 'https://instagram.com/developer/endpoints/users/#get_users_search',
'methods': ['GET']
},
'user_follows': {
'resource': 'users/{id}/follows',
'docs': 'https://instagram.com/developer/endpoints/relationships/#get_users_follows',
'methods': ['GET']
},
'user_followed_by': {
'resource': 'users/{id}/followed-by',
'docs': 'https://instagram.com/developer/endpoints/relationships/#get_users_followed_by',
'methods': ['GET']
},
'user_relationship': {
'resource': 'users/{id}/relationship',
'docs': 'https://instagram.com/developer/endpoints/relationships/#get_relationship',
'methods': ['GET', 'POST']
},
'media': {
'resource': 'media/{id}',
'docs': 'https://instagram.com/developer/endpoints/media/#get_media',
'methods': ['GET']
},
'media_shortcode': {
'resource': 'media/shortcode/{shortcode}',
'docs': 'https://instagram.com/developer/endpoints/media/#get_media_by_shortcode',
'methods': ['GET']
},
'media_search': {
'resource': 'media/search',
'docs': 'https://instagram.com/developer/endpoints/media/#get_media_search',
'methods': ['GET']
},
'media_popular': {
'resource': 'media/popular',
'docs': 'https://instagram.com/developer/endpoints/media/#get_media_popular',
'methods': ['GET']
},
'media_comments': {
'resource': 'media/{id}/comments',
'docs': 'https://instagram.com/developer/endpoints/comments/#get_media_comments',
'methods': ['GET', 'POST']
},
'media_comment': {
'resource': 'media/{id}/comments/{comment_id}',
'docs': 'https://instagram.com/developer/endpoints/comments/#delete_media_comments',
'methods': ['DELETE']
},
'media_likes': {
'resource': 'media/{id}/likes',
'docs': 'https://instagram.com/developer/endpoints/likes/#get_media_likes',
'methods': ['GET', 'POST', 'DELETE']
},
'tag': {
'resource': 'tags/{name}',
'docs': 'https://instagram.com/developer/endpoints/tags/#get_tags',
'methods': ['GET']
},
'tag_media_recent': {
'resource': 'tags/{name}/media/recent',
'docs': 'https://instagram.com/developer/endpoints/tags/#get_tags_media_recent',
'methods': ['GET']
},
'tag_search': {
'resource': 'tags/search',
'docs': 'https://instagram.com/developer/endpoints/tags/#get_tags_search',
'methods': ['GET']
},
'location': {
'resource': 'locations/{id}',
'docs': 'https://instagram.com/developer/endpoints/locations/#get_locations',
'methods': ['GET']
},
'location_media_recent': {
'resource': 'locations/{id}/media/recent',
'docs': 'https://instagram.com/developer/endpoints/locations/#get_locations_media_recent',
'methods': ['GET']
},
'location_search': {
'resource': 'locations/search',
'docs': 'https://instagram.com/developer/endpoints/locations/#get_locations_search',
'methods': ['GET']
},
'geography_media_recent': {
'resource': 'geographies/{geo_id}/media/recent',
'docs': 'https://instagram.com/developer/endpoints/geographies/#get_geographies_media_recent',
'methods': ['GET']
}
}
| mit |
Antiun/odoo | addons/mrp_operations/mrp_operations.py | 193 | 27173 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import time
from datetime import datetime
from openerp.tools.translate import _
#----------------------------------------------------------
# Work Centers
#----------------------------------------------------------
# capacity_hour : capacity per hour. default: 1.0.
# Eg: If 5 concurrent operations at one time: capacity = 5 (because 5 employees)
# unit_per_cycle : how many units are produced for one cycle
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'move_dest_id_lines': fields.one2many('stock.move','move_dest_id', 'Children Moves')
}
class mrp_production_workcenter_line(osv.osv):
def _get_date_end(self, cr, uid, ids, field_name, arg, context=None):
""" Finds ending date.
@return: Dictionary of values.
"""
ops = self.browse(cr, uid, ids, context=context)
date_and_hours_by_cal = [(op.date_planned, op.hour, op.workcenter_id.calendar_id.id) for op in ops if op.date_planned]
intervals = self.pool.get('resource.calendar').interval_get_multi(cr, uid, date_and_hours_by_cal)
res = {}
for op in ops:
res[op.id] = False
if op.date_planned:
i = intervals.get((op.date_planned, op.hour, op.workcenter_id.calendar_id.id))
if i:
res[op.id] = i[-1][1].strftime('%Y-%m-%d %H:%M:%S')
else:
res[op.id] = op.date_planned
return res
def onchange_production_id(self, cr, uid, ids, production_id, context=None):
if not production_id:
return {}
production = self.pool.get('mrp.production').browse(cr, uid, production_id, context=None)
result = {
'product': production.product_id.id,
'qty': production.product_qty,
'uom': production.product_uom.id,
}
return {'value': result}
_inherit = 'mrp.production.workcenter.line'
_order = "sequence, date_planned"
_columns = {
'state': fields.selection([('draft','Draft'),('cancel','Cancelled'),('pause','Pending'),('startworking', 'In Progress'),('done','Finished')],'Status', readonly=True, copy=False,
help="* When a work order is created it is set in 'Draft' status.\n" \
"* When user sets work order in start mode that time it will be set in 'In Progress' status.\n" \
"* When work order is in running mode, during that time if user wants to stop or to make changes in order then can set in 'Pending' status.\n" \
"* When the user cancels the work order it will be set in 'Canceled' status.\n" \
"* When order is completely processed that time it is set in 'Finished' status."),
'date_planned': fields.datetime('Scheduled Date', select=True),
'date_planned_end': fields.function(_get_date_end, string='End Date', type='datetime'),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'delay': fields.float('Working Hours',help="The elapsed time between operation start and stop in this Work Center",readonly=True),
'production_state':fields.related('production_id','state',
type='selection',
selection=[('draft','Draft'),('confirmed','Waiting Goods'),('ready','Ready to Produce'),('in_production','In Production'),('cancel','Canceled'),('done','Done')],
string='Production Status', readonly=True),
'product':fields.related('production_id','product_id',type='many2one',relation='product.product',string='Product',
readonly=True),
'qty':fields.related('production_id','product_qty',type='float',string='Qty',readonly=True, store=True),
'uom':fields.related('production_id','product_uom',type='many2one',relation='product.uom',string='Unit of Measure',readonly=True),
}
_defaults = {
'state': 'draft',
'delay': 0.0,
'production_state': 'draft'
}
def modify_production_order_state(self, cr, uid, ids, action):
""" Modifies production order state if work order state is changed.
@param action: Action to perform.
@return: Nothing
"""
prod_obj_pool = self.pool.get('mrp.production')
oper_obj = self.browse(cr, uid, ids)[0]
prod_obj = oper_obj.production_id
if action == 'start':
if prod_obj.state =='confirmed':
prod_obj_pool.force_production(cr, uid, [prod_obj.id])
prod_obj_pool.signal_workflow(cr, uid, [prod_obj.id], 'button_produce')
elif prod_obj.state =='ready':
prod_obj_pool.signal_workflow(cr, uid, [prod_obj.id], 'button_produce')
elif prod_obj.state =='in_production':
return
else:
raise osv.except_osv(_('Error!'),_('Manufacturing order cannot be started in state "%s"!') % (prod_obj.state,))
else:
open_count = self.search_count(cr,uid,[('production_id','=',prod_obj.id), ('state', '!=', 'done')])
flag = not bool(open_count)
if flag:
for production in prod_obj_pool.browse(cr, uid, [prod_obj.id], context= None):
if production.move_lines or production.move_created_ids:
prod_obj_pool.action_produce(cr,uid, production.id, production.product_qty, 'consume_produce', context = None)
prod_obj_pool.signal_workflow(cr, uid, [oper_obj.production_id.id], 'button_produce_done')
return
def write(self, cr, uid, ids, vals, context=None, update=True):
result = super(mrp_production_workcenter_line, self).write(cr, uid, ids, vals, context=context)
prod_obj = self.pool.get('mrp.production')
if vals.get('date_planned', False) and update:
for prod in self.browse(cr, uid, ids, context=context):
if prod.production_id.workcenter_lines:
dstart = min(vals['date_planned'], prod.production_id.workcenter_lines[0]['date_planned'])
prod_obj.write(cr, uid, [prod.production_id.id], {'date_start':dstart}, context=context, mini=False)
return result
def action_draft(self, cr, uid, ids, context=None):
""" Sets state to draft.
@return: True
"""
return self.write(cr, uid, ids, {'state': 'draft'}, context=context)
def action_start_working(self, cr, uid, ids, context=None):
""" Sets state to start working and writes starting date.
@return: True
"""
self.modify_production_order_state(cr, uid, ids, 'start')
self.write(cr, uid, ids, {'state':'startworking', 'date_start': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_done(self, cr, uid, ids, context=None):
""" Sets state to done, writes finish date and calculates delay.
@return: True
"""
delay = 0.0
date_now = time.strftime('%Y-%m-%d %H:%M:%S')
obj_line = self.browse(cr, uid, ids[0])
date_start = datetime.strptime(obj_line.date_start,'%Y-%m-%d %H:%M:%S')
date_finished = datetime.strptime(date_now,'%Y-%m-%d %H:%M:%S')
delay += (date_finished-date_start).days * 24
delay += (date_finished-date_start).seconds / float(60*60)
self.write(cr, uid, ids, {'state':'done', 'date_finished': date_now,'delay':delay}, context=context)
self.modify_production_order_state(cr,uid,ids,'done')
return True
def action_cancel(self, cr, uid, ids, context=None):
""" Sets state to cancel.
@return: True
"""
return self.write(cr, uid, ids, {'state':'cancel'}, context=context)
def action_pause(self, cr, uid, ids, context=None):
""" Sets state to pause.
@return: True
"""
return self.write(cr, uid, ids, {'state':'pause'}, context=context)
def action_resume(self, cr, uid, ids, context=None):
""" Sets state to startworking.
@return: True
"""
return self.write(cr, uid, ids, {'state':'startworking'}, context=context)
class mrp_production(osv.osv):
_inherit = 'mrp.production'
_columns = {
'allow_reorder': fields.boolean('Free Serialisation', help="Check this to be able to move independently all production orders, without moving dependent ones."),
}
def _production_date_end(self, cr, uid, ids, prop, unknow_none, context=None):
""" Calculates planned end date of production order.
@return: Dictionary of values
"""
result = {}
for prod in self.browse(cr, uid, ids, context=context):
result[prod.id] = prod.date_planned
for line in prod.workcenter_lines:
result[prod.id] = max(line.date_planned_end, result[prod.id])
return result
def action_production_end(self, cr, uid, ids, context=None):
""" Finishes work order if production order is done.
@return: Super method
"""
obj = self.browse(cr, uid, ids, context=context)[0]
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for workcenter_line in obj.workcenter_lines:
if workcenter_line.state == 'draft':
workcenter_line.signal_workflow('button_start_working')
workcenter_line.signal_workflow('button_done')
return super(mrp_production,self).action_production_end(cr, uid, ids, context=context)
def action_in_production(self, cr, uid, ids, context=None):
""" Changes state to In Production and writes starting date.
@return: True
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
for prod in self.browse(cr, uid, ids):
if prod.workcenter_lines:
workcenter_pool.signal_workflow(cr, uid, [prod.workcenter_lines[0].id], 'button_start_working')
return super(mrp_production,self).action_in_production(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels work order if production order is canceled.
@return: Super method
"""
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
obj = self.browse(cr, uid, ids,context=context)[0]
workcenter_pool.signal_workflow(cr, uid, [record.id for record in obj.workcenter_lines], 'button_cancel')
return super(mrp_production,self).action_cancel(cr,uid,ids,context=context)
def _compute_planned_workcenter(self, cr, uid, ids, context=None, mini=False):
""" Computes planned and finished dates for work order.
@return: Calculated date
"""
dt_end = datetime.now()
if context is None:
context = {}
for po in self.browse(cr, uid, ids, context=context):
dt_end = datetime.strptime(po.date_planned, '%Y-%m-%d %H:%M:%S')
if not po.date_start:
self.write(cr, uid, [po.id], {
'date_start': po.date_planned
}, context=context, update=False)
old = None
for wci in range(len(po.workcenter_lines)):
wc = po.workcenter_lines[wci]
if (old is None) or (wc.sequence>old):
dt = dt_end
if context.get('__last_update'):
del context['__last_update']
if (wc.date_planned < dt.strftime('%Y-%m-%d %H:%M:%S')) or mini:
self.pool.get('mrp.production.workcenter.line').write(cr, uid, [wc.id], {
'date_planned': dt.strftime('%Y-%m-%d %H:%M:%S')
}, context=context, update=False)
i = self.pool.get('resource.calendar').interval_get(
cr,
uid,
#passing False makes resource_resource._schedule_hours run 1000 iterations doing nothing
wc.workcenter_id.calendar_id and wc.workcenter_id.calendar_id.id or None,
dt,
wc.hour or 0.0
)
if i:
dt_end = max(dt_end, i[-1][1])
else:
dt_end = datetime.strptime(wc.date_planned_end, '%Y-%m-%d %H:%M:%S')
old = wc.sequence or 0
super(mrp_production, self).write(cr, uid, [po.id], {
'date_finished': dt_end
})
return dt_end
def _move_pass(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves finding interval from resource calendar.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
todo = list(po.move_lines)
dt = datetime.strptime(po.date_start,'%Y-%m-%d %H:%M:%S')
while todo:
l = todo.pop(0)
if l.state in ('done','cancel','draft'):
continue
todo += l.move_dest_id_lines
date_end = l.production_id.date_finished
if date_end and datetime.strptime(date_end, '%Y-%m-%d %H:%M:%S') > dt:
if l.production_id.state not in ('done','cancel'):
for wc in l.production_id.workcenter_lines:
i = self.pool.get('resource.calendar').interval_min_get(
cr,
uid,
wc.workcenter_id.calendar_id.id or False,
dt, wc.hour or 0.0
)
dt = i[0][0]
if l.production_id.date_start > dt.strftime('%Y-%m-%d %H:%M:%S'):
self.write(cr, uid, [l.production_id.id], {'date_start':dt.strftime('%Y-%m-%d %H:%M:%S')}, mini=True)
return True
def _move_futur(self, cr, uid, ids, context=None):
""" Calculates start date for stock moves.
@return: True
"""
for po in self.browse(cr, uid, ids, context=context):
if po.allow_reorder:
continue
for line in po.move_created_ids:
l = line
while l.move_dest_id:
l = l.move_dest_id
if l.state in ('done','cancel','draft'):
break
if l.production_id.state in ('done','cancel'):
break
if l.production_id and (l.production_id.date_start < po.date_finished):
self.write(cr, uid, [l.production_id.id], {'date_start': po.date_finished})
break
return True
def write(self, cr, uid, ids, vals, context=None, update=True, mini=True):
direction = {}
if vals.get('date_start', False):
for po in self.browse(cr, uid, ids, context=context):
direction[po.id] = cmp(po.date_start, vals.get('date_start', False))
result = super(mrp_production, self).write(cr, uid, ids, vals, context=context)
if (vals.get('workcenter_lines', False) or vals.get('date_start', False) or vals.get('date_planned', False)) and update:
self._compute_planned_workcenter(cr, uid, ids, context=context, mini=mini)
for d in direction:
if direction[d] == 1:
# the production order has been moved to the passed
self._move_pass(cr, uid, [d], context=context)
pass
elif direction[d] == -1:
self._move_futur(cr, uid, [d], context=context)
# the production order has been moved to the future
pass
return result
def action_compute(self, cr, uid, ids, properties=None, context=None):
""" Computes bills of material of a product and planned date of work order.
@param properties: List containing dictionaries of properties.
@return: No. of products.
"""
result = super(mrp_production, self).action_compute(cr, uid, ids, properties=properties, context=context)
self._compute_planned_workcenter(cr, uid, ids, context=context)
return result
class mrp_operations_operation_code(osv.osv):
_name="mrp_operations.operation.code"
_columns={
'name': fields.char('Operation Name', required=True),
'code': fields.char('Code', size=16, required=True),
'start_stop': fields.selection([('start','Start'),('pause','Pause'),('resume','Resume'),('cancel','Cancelled'),('done','Done')], 'Status', required=True),
}
class mrp_operations_operation(osv.osv):
_name="mrp_operations.operation"
def _order_date_search_production(self, cr, uid, ids, context=None):
""" Finds operations for a production order.
@return: List of ids
"""
operation_ids = self.pool.get('mrp_operations.operation').search(cr, uid, [('production_id','=',ids[0])], context=context)
return operation_ids
def _get_order_date(self, cr, uid, ids, field_name, arg, context=None):
""" Calculates planned date for an operation.
@return: Dictionary of values
"""
res={}
operation_obj = self.browse(cr, uid, ids, context=context)
for operation in operation_obj:
res[operation.id] = operation.production_id.date_planned
return res
def calc_delay(self, cr, uid, vals):
""" Calculates delay of work order.
@return: Delay
"""
code_lst = []
time_lst = []
code_ids = self.pool.get('mrp_operations.operation.code').search(cr, uid, [('id','=',vals['code_id'])])
code = self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids)[0]
oper_ids = self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs = self.browse(cr,uid,oper_ids)
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
time_lst.append(oper.date_start)
code_lst.append(code.start_stop)
time_lst.append(vals['date_start'])
diff = 0
for i in range(0,len(code_lst)):
if code_lst[i] == 'pause' or code_lst[i] == 'done' or code_lst[i] == 'cancel':
if not i: continue
if code_lst[i-1] not in ('resume','start'):
continue
a = datetime.strptime(time_lst[i-1],'%Y-%m-%d %H:%M:%S')
b = datetime.strptime(time_lst[i],'%Y-%m-%d %H:%M:%S')
diff += (b-a).days * 24
diff += (b-a).seconds / float(60*60)
return diff
def check_operation(self, cr, uid, vals):
""" Finds which operation is called ie. start, pause, done, cancel.
@param vals: Dictionary of values.
@return: True or False
"""
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr,uid,code_ids)[0]
code_lst = []
oper_ids=self.search(cr,uid,[('production_id','=',vals['production_id']),('workcenter_id','=',vals['workcenter_id'])])
oper_objs=self.browse(cr,uid,oper_ids)
if not oper_objs:
if code.start_stop!='start':
raise osv.except_osv(_('Sorry!'),_('Operation is not started yet!'))
return False
else:
for oper in oper_objs:
code_lst.append(oper.code_id.start_stop)
if code.start_stop=='start':
if 'start' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation has already started! You can either Pause/Finish/Cancel the operation.'))
return False
if code.start_stop=='pause':
if code_lst[len(code_lst)-1]!='resume' and code_lst[len(code_lst)-1]!='start':
raise osv.except_osv(_('Error!'),_('In order to Pause the operation, it must be in the Start or Resume state!'))
return False
if code.start_stop=='resume':
if code_lst[len(code_lst)-1]!='pause':
raise osv.except_osv(_('Error!'),_('In order to Resume the operation, it must be in the Pause state!'))
return False
if code.start_stop=='done':
if code_lst[len(code_lst)-1]!='start' and code_lst[len(code_lst)-1]!='resume':
raise osv.except_osv(_('Sorry!'),_('In order to Finish the operation, it must be in the Start or Resume state!'))
return False
if 'cancel' in code_lst:
raise osv.except_osv(_('Sorry!'),_('Operation is Already Cancelled!'))
return False
if code.start_stop=='cancel':
if not 'start' in code_lst :
raise osv.except_osv(_('Error!'),_('No operation to cancel.'))
return False
if 'done' in code_lst:
raise osv.except_osv(_('Error!'),_('Operation is already finished!'))
return False
return True
def write(self, cr, uid, ids, vals, context=None):
oper_objs = self.browse(cr, uid, ids, context=context)[0]
vals['production_id']=oper_objs.production_id.id
vals['workcenter_id']=oper_objs.workcenter_id.id
if 'code_id' in vals:
self.check_operation(cr, uid, vals)
if 'date_start' in vals:
vals['date_start']=vals['date_start']
vals['code_id']=oper_objs.code_id.id
delay=self.calc_delay(cr, uid, vals)
wc_op_id=self.pool.get('mrp.production.workcenter.line').search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
self.pool.get('mrp.production.workcenter.line').write(cr,uid,wc_op_id,{'delay':delay})
return super(mrp_operations_operation, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
workcenter_pool = self.pool.get('mrp.production.workcenter.line')
code_ids=self.pool.get('mrp_operations.operation.code').search(cr,uid,[('id','=',vals['code_id'])])
code=self.pool.get('mrp_operations.operation.code').browse(cr, uid, code_ids, context=context)[0]
wc_op_id=workcenter_pool.search(cr,uid,[('workcenter_id','=',vals['workcenter_id']),('production_id','=',vals['production_id'])])
if code.start_stop in ('start','done','pause','cancel','resume'):
if not wc_op_id:
production_obj=self.pool.get('mrp.production').browse(cr, uid, vals['production_id'], context=context)
wc_op_id.append(workcenter_pool.create(cr,uid,{'production_id':vals['production_id'],'name':production_obj.product_id.name,'workcenter_id':vals['workcenter_id']}))
if code.start_stop=='start':
workcenter_pool.action_start_working(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_start_working')
if code.start_stop=='done':
workcenter_pool.action_done(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_done')
self.pool.get('mrp.production').write(cr,uid,vals['production_id'],{'date_finished':datetime.now().strftime('%Y-%m-%d %H:%M:%S')})
if code.start_stop=='pause':
workcenter_pool.action_pause(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_pause')
if code.start_stop=='resume':
workcenter_pool.action_resume(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_resume')
if code.start_stop=='cancel':
workcenter_pool.action_cancel(cr,uid,wc_op_id)
workcenter_pool.signal_workflow(cr, uid, [wc_op_id[0]], 'button_cancel')
if not self.check_operation(cr, uid, vals):
return
delay=self.calc_delay(cr, uid, vals)
line_vals = {}
line_vals['delay'] = delay
if vals.get('date_start',False):
if code.start_stop == 'done':
line_vals['date_finished'] = vals['date_start']
elif code.start_stop == 'start':
line_vals['date_start'] = vals['date_start']
self.pool.get('mrp.production.workcenter.line').write(cr, uid, wc_op_id, line_vals, context=context)
return super(mrp_operations_operation, self).create(cr, uid, vals, context=context)
def initialize_workflow_instance(self, cr, uid, context=None):
mrp_production_workcenter_line = self.pool.get('mrp.production.workcenter.line')
line_ids = mrp_production_workcenter_line.search(cr, uid, [], context=context)
mrp_production_workcenter_line.create_workflow(cr, uid, line_ids)
return True
_columns={
'production_id':fields.many2one('mrp.production','Production',required=True),
'workcenter_id':fields.many2one('mrp.workcenter','Work Center',required=True),
'code_id':fields.many2one('mrp_operations.operation.code','Code',required=True),
'date_start': fields.datetime('Start Date'),
'date_finished': fields.datetime('End Date'),
'order_date': fields.function(_get_order_date,string='Order Date',type='date',store={'mrp.production':(_order_date_search_production,['date_planned'], 10)}),
}
_defaults={
'date_start': lambda *a:datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aricchen/openHR | openerp/addons/sale/wizard/sale_make_invoice.py | 17 | 3187 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_make_invoice(osv.osv_memory):
_name = "sale.make.invoice"
_description = "Sales Make Invoice"
_columns = {
'grouped': fields.boolean('Group the invoices', help='Check the box to group the invoices for the same customers'),
'invoice_date': fields.date('Invoice Date'),
}
_defaults = {
'grouped': False,
'invoice_date': fields.date.context_today,
}
def view_init(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
record_id = context and context.get('active_id', False)
order = self.pool.get('sale.order').browse(cr, uid, record_id, context=context)
if order.state == 'draft':
raise osv.except_osv(_('Warning!'), _('You cannot create invoice when sales order is not confirmed.'))
return False
def make_invoices(self, cr, uid, ids, context=None):
order_obj = self.pool.get('sale.order')
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
newinv = []
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
for sale_order in order_obj.browse(cr, uid, context.get(('active_ids'), []), context=context):
if sale_order.state != 'manual':
raise osv.except_osv(_('Warning!'), _("You shouldn't manually invoice the following sale order %s") % (sale_order.name))
order_obj.action_invoice_create(cr, uid, context.get(('active_ids'), []), data['grouped'], date_invoice=data['invoice_date'])
for o in order_obj.browse(cr, uid, context.get(('active_ids'), []), context=context):
for i in o.invoice_ids:
newinv.append(i.id)
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
result['domain'] = "[('id','in', [" + ','.join(map(str, newinv)) + "])]"
return result
sale_make_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
endlessm/chromium-browser | third_party/catapult/third_party/gsutil/gslib/vendored/boto/tests/unit/emr/test_instance_group_args.py | 112 | 2056 | #!/usr/bin/env python
# Author: Charlie Schluting <charlie@schluting.com>
#
# Test to ensure initalization of InstanceGroup object emits appropriate errors
# if bidprice is not specified, but allows float, int, Decimal.
from decimal import Decimal
from tests.compat import unittest
from boto.emr.instance_group import InstanceGroup
class TestInstanceGroupArgs(unittest.TestCase):
def test_bidprice_missing_spot(self):
"""
Test InstanceGroup init raises ValueError when market==spot and
bidprice is not specified.
"""
with self.assertRaisesRegexp(ValueError, 'bidprice must be specified'):
InstanceGroup(1, 'MASTER', 'm1.small',
'SPOT', 'master')
def test_bidprice_missing_ondemand(self):
"""
Test InstanceGroup init accepts a missing bidprice arg, when market is
ON_DEMAND.
"""
instance_group = InstanceGroup(1, 'MASTER', 'm1.small',
'ON_DEMAND', 'master')
def test_bidprice_Decimal(self):
"""
Test InstanceGroup init works with bidprice type = Decimal.
"""
instance_group = InstanceGroup(1, 'MASTER', 'm1.small',
'SPOT', 'master', bidprice=Decimal(1.10))
self.assertEquals('1.10', instance_group.bidprice[:4])
def test_bidprice_float(self):
"""
Test InstanceGroup init works with bidprice type = float.
"""
instance_group = InstanceGroup(1, 'MASTER', 'm1.small',
'SPOT', 'master', bidprice=1.1)
self.assertEquals('1.1', instance_group.bidprice)
def test_bidprice_string(self):
"""
Test InstanceGroup init works with bidprice type = string.
"""
instance_group = InstanceGroup(1, 'MASTER', 'm1.small',
'SPOT', 'master', bidprice='1.1')
self.assertEquals('1.1', instance_group.bidprice)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
OffenesJena/codefor.de | download_avatars.py | 8 | 2259 | #!/usr/bin/env python
import glob
import os
import logging
import requests
import yaml
logging.basicConfig(level=logging.DEBUG)
BLACKLIST = ('blog', '_site',)
def get_github(username):
response = requests.get('https://api.github.com/users/' + username)
response_json = response.json()
if 'avatar_url' in response_json:
return response_json['avatar_url']
return False
avatar_source = [
('github', get_github),
# ('twitter', get_twitter)
]
AVATAR_PATH = 'img/avatars/'
def main():
existing = glob.glob(AVATAR_PATH + '*.jpg')
existing = [x.split('/')[-1].split('.')[0] for x in existing]
labs = [x for x in glob.glob('_labs/*.yml') if not x.startswith(BLACKLIST)]
for lab in labs:
logging.debug('Processing Lab %s', lab)
with open(lab) as f:
contents = f.read()
try:
_, frontmatter = contents.split('---\n', 2)
except ValueError:
_, frontmatter, _ = contents.split('---\n', 2)
meta = yaml.load(frontmatter)
if 'members' not in meta:
continue
for member in meta['members']:
if member['name'] is None:
continue
logging.debug('Processing Lab Member %s', member['name'])
for source, get_image in avatar_source:
logging.debug('Checking %s for %s', source, member['name'])
key = 'username-%s' % source
if key not in member:
continue
username = member[key]
if username in existing:
break
image_url = get_image(username)
if not image_url:
continue
image = requests.get(image_url, stream=True)
image_path = os.path.join(AVATAR_PATH, username + '.jpg')
logging.debug('Downloading image to %s', image_path)
with open(image_path, 'wb') as fd:
for chunk in image.iter_content(1024):
fd.write(chunk)
break
if __name__ == '__main__':
main()
| mit |
hsoft/pdfmasher | ebooks/oeb/transforms/metadata.py | 1 | 3518 | # Copyright 2009, Kovid Goyal <kovid@kovidgoyal.net>
# Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPL v3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/gplv3_license
import os
from datetime import datetime
from ...utils.mimetypes import guess_type
def meta_info_to_oeb_metadata(mi, m, override_input_metadata=False):
from ..base import OPF
if not mi.is_null('title'):
m.clear('title')
m.add('title', mi.title)
if mi.title_sort:
if not m.title:
m.add('title', mi.title_sort)
m.clear('title_sort')
m.add('title_sort', mi.title_sort)
if not mi.is_null('authors'):
m.filter('creator', lambda x : x.role.lower() in ['aut', ''])
for a in mi.authors:
attrib = {'role':'aut'}
if mi.author_sort:
attrib[OPF('file-as')] = mi.author_sort
m.add('creator', a, attrib=attrib)
if not mi.is_null('book_producer'):
m.filter('contributor', lambda x : x.role.lower() == 'bkp')
m.add('contributor', mi.book_producer, role='bkp')
elif override_input_metadata:
m.filter('contributor', lambda x : x.role.lower() == 'bkp')
if not mi.is_null('comments'):
m.clear('description')
m.add('description', mi.comments)
elif override_input_metadata:
m.clear('description')
if not mi.is_null('publisher'):
m.clear('publisher')
m.add('publisher', mi.publisher)
elif override_input_metadata:
m.clear('publisher')
if not mi.is_null('series'):
m.clear('series')
m.add('series', mi.series)
elif override_input_metadata:
m.clear('series')
identifiers = mi.get_identifiers()
set_isbn = False
for typ, val in identifiers.items():
has = False
if typ.lower() == 'isbn':
set_isbn = True
for x in m.identifier:
if x.scheme.lower() == typ.lower():
x.content = val
has = True
if not has:
m.add('identifier', val, scheme=typ.upper())
if override_input_metadata and not set_isbn:
m.filter('identifier', lambda x: x.scheme.lower() == 'isbn')
if not mi.is_null('language'):
m.clear('language')
m.add('language', mi.language)
if not mi.is_null('series_index'):
m.clear('series_index')
m.add('series_index', mi.format_series_index())
elif override_input_metadata:
m.clear('series_index')
if not mi.is_null('rating'):
m.clear('rating')
m.add('rating', '%.2f'%mi.rating)
elif override_input_metadata:
m.clear('rating')
if not mi.is_null('tags'):
m.clear('subject')
for t in mi.tags:
m.add('subject', t)
elif override_input_metadata:
m.clear('subject')
if not mi.is_null('pubdate'):
m.clear('date')
m.add('date', mi.pubdate.isoformat())
if not mi.is_null('timestamp'):
m.clear('timestamp')
m.add('timestamp', mi.timestamp.isoformat())
if not mi.is_null('rights'):
m.clear('rights')
m.add('rights', mi.rights)
if not mi.is_null('publication_type'):
m.clear('publication_type')
m.add('publication_type', mi.publication_type)
if not m.timestamp:
m.add('timestamp', datetime.now().isoformat())
| gpl-3.0 |
usc-isi-i2/WEDC | wedc/domain/core/data/loader.py | 1 | 4946 | import os
from wedc.domain.entities.post import Post
# from wedc.domain.core.data.loaders import es_loader
from wedc.domain.core.data import cleaner
mapping = None
def load_input(input):
# data from intermediate data
# data[0]: source id
# data[1]: content, original content of data, \t\n\r should be removed
imd_dataset = load_intermediate_data(input)
dataset = []
for i, data in enumerate(imd_dataset):
# build data
# data[0]: pid, used inside program
# data[1]: sid, unique id for original data
# data[2]: label, 0 if unknown
# data[3]: extraction (tokens), split by space, extracted from original content
pid = i + 1
sid = data[0]
extraction = generate_extraction(data[1])
dataset.append([pid, sid, 0, extraction])
return dataset
def load_db(start_pid=1):
from wedc.infrastructure.model.labelled_data import LabelledData
# load dataset from database
labelled_dataset = LabelledData.load_data()
dataset = []
for idx, ld in enumerate(labelled_dataset):
# data[0]: pid, used inside program
# data[1]: sid, unique id for original data
# data[2]: label, 0 if unknown
# data[3]: extraction (tokens), split by space, extracted from original content
data = [start_pid+idx, '', int(ld.label), str(ld.extraction)]
dataset.append(data)
return dataset
def generate_compressed_data(input=None):
start_pid = 1
dataset = []
if input:
dataset = load_input(input)
start_pid += len(dataset)
dataset.extend(load_db(start_pid=start_pid))
return dataset
#######################################################
# Common
#######################################################
def generate_extraction(content):
try:
post = Post('', '', content)
except Exception as e:
print e
return ''
else:
return post.body
#######################################################
# Intermediate Data
#######################################################
def load_intermediate_data(path, format='jsonlines'):
dataset = []
if format == 'jsonlines':
import jsonlines
lines = jsonlines.open(path, mode='r')
for line in lines:
dataset.append([line['sid'], line['content']])
elif format == 'csv':
import csv
with open(path, 'rb') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
dataset.append(row)
return dataset
def generate_intermediate_data(dataset, output_path, format='jsonlines'):
if format == 'jsonlines':
import jsonlines
obj = jsonlines.open(output_path, mode='w')
for data in dataset:
# obj.dump([{'sid': data[0], 'content': data[1]}])
obj.dump({'sid': data[0], 'content': data[1]})
elif format == 'csv':
import csv
with open(output_path, 'wb') as csvfile:
spamwriter = csv.writer(csvfile)
for data in dataset:
spamwriter.writerow(data)
#######################################################
# Load with pid
#######################################################
def load_data(input, output, no_dups=False):
data = es_loader.load(input)
if no_dups:
origin_backup_file = open(output+'_with_dups', 'wb')
origin_backup_file.writelines(data)
origin_backup_file.close()
data = cleaner.remove_dups(data, output+'_mapping')
target_file = open(output, 'wb')
target_file.writelines(data)
target_file.close()
return data
def load_data_by_post_id(path, post_id, no_dups=False):
target_id = post_id
if no_dups and mapping:
target_id = mapping[post_id]
return es_loader.load_post(path, target_id)
def load_data_by_post_id_set(path, post_id_set, output, no_dups=False):
import re
target_id_set = post_id_set
if no_dups and mapping:
target_id_set = [mapping[post_id] for post_id in post_id_set]
raw_posts, posts = es_loader.load_post_by_id_set(path, target_id_set)
target_file = open(output, 'wb')
for i in range(len(raw_posts)):
text = re.sub(r"([\t\n\r]|\\+)", " ", raw_posts[i])
text = text.encode('ascii', 'ignore')
target_file.write('#'*40 + '\n')
target_file.write(' '*6 + 'Post id: ' + str(post_id_set[i]) + ' & ' + str(target_id_set[i]) + ' Label: ' +' \n')
target_file.write('#'*40 + '\n')
target_file.write(text + '\n')
target_file.write('#'*40 + '\n')
target_file.write(posts[i])
target_file.write('#'*40 + '\n'*3)
target_file.close()
def load_nodups2dups_mapping(path):
mapping = {}
with open(path, 'rb') as f:
for line in f:
line = line.strip().split('\t')
mapping[int(line[0])] = int(line[1])
return mapping
| apache-2.0 |
mrquim/repository.mrquim | repo/plugin.video.netflix/resources/lib/MSLv2.py | 4 | 28905 | # pylint: skip-file
# -*- coding: utf-8 -*-
# Author: trummerjo
# Module: MSLHttpRequestHandler
# Created on: 26.01.2017
# License: MIT https://goo.gl/5bMj3H
import re
import sys
import zlib
import gzip
import json
import time
import base64
import random
import uuid
from StringIO import StringIO
from datetime import datetime
import requests
import xml.etree.ElementTree as ET
import xbmcaddon
#check if we are on Android
import subprocess
try:
sdkversion = int(subprocess.check_output(
['/system/bin/getprop', 'ro.build.version.sdk']))
except:
sdkversion = 0
if sdkversion >= 18:
from MSLMediaDrm import MSLMediaDrmCrypto as MSLHandler
else:
from MSLCrypto import MSLCrypto as MSLHandler
class MSL(object):
# Is a handshake already performed and the keys loaded
handshake_performed = False
last_license_url = ''
last_drm_context = ''
last_playback_context = ''
current_message_id = 0
session = requests.session()
rndm = random.SystemRandom()
tokens = []
base_url = 'https://www.netflix.com/nq/msl_v1/cadmium/'
endpoints = {
'manifest': base_url + 'pbo_manifests/%5E1.0.0/router',
#'license': base_url + 'pbo_licenses/%5E1.0.0/router'
'license': 'http://www.netflix.com/api/msl/NFCDCH-LX/cadmium/license'
}
def __init__(self, nx_common):
"""
The Constructor checks for already existing crypto Keys.
If they exist it will load the existing keys
"""
self.nx_common = nx_common
self.crypto = MSLHandler(nx_common)
if self.nx_common.file_exists(self.nx_common.data_path, 'msl_data.json'):
self.init_msl_data()
else:
self.crypto.fromDict(None)
self.__perform_key_handshake()
def load_manifest(self, viewable_id, dolby, hevc, hdr, dolbyvision, vp9):
"""
Loads the manifets for the given viewable_id and
returns a mpd-XML-Manifest
:param viewable_id: The id of of the viewable
:return: MPD XML Manifest or False if no success
"""
esn = self.nx_common.get_esn()
manifest_request_data = {
'version': 2,
'url': '/manifest',
'id': 15423166626396,
'esn': esn,
'languages': ['de-US'],
'uiVersion': 'shakti-vb45817f4',
'clientVersion': '6.0011.474.011',
'params': {
'type': 'standard',
'viewableId': [viewable_id],
'flavor': 'PRE_FETCH',
'drmType': 'widevine',
'drmVersion': 25,
'usePsshBox': True,
'isBranching': False,
'useHttpsStreams': True,
'imageSubtitleHeight': 1080,
'uiVersion': 'shakti-vb45817f4',
'clientVersion': '6.0011.474.011',
'supportsPreReleasePin': True,
'supportsWatermark': True,
'showAllSubDubTracks': False,
'titleSpecificData': {},
'videoOutputInfo': [{
'type': 'DigitalVideoOutputDescriptor',
'outputType': 'unknown',
'supportedHdcpVersions': [],
'isHdcpEngaged': False
}],
'preferAssistiveAudio': False,
'isNonMember': False
}
}
manifest_request_data['params']['titleSpecificData'][viewable_id] = { 'unletterboxed': False }
profiles = ['playready-h264mpl30-dash', 'playready-h264mpl31-dash', 'playready-h264hpl30-dash', 'playready-h264hpl31-dash', 'heaac-2-dash', 'BIF240', 'BIF320']
# subtitles
addon = xbmcaddon.Addon('inputstream.adaptive')
if addon and self.nx_common.compare_versions(map(int, addon.getAddonInfo('version').split('.')), [2, 3, 8]):
profiles.append('webvtt-lssdh-ios8')
else:
profiles.append('simplesdh')
# add hevc profiles if setting is set
if hevc is True:
main = 'hevc-main-'
main10 = 'hevc-main10-'
prk = 'dash-cenc-prk'
cenc = 'dash-cenc'
ctl = 'dash-cenc-tl'
profiles.append(main10 + 'L41-' + cenc)
profiles.append(main10 + 'L50-' + cenc)
profiles.append(main10 + 'L51-' + cenc)
profiles.append(main + 'L30-' + cenc)
profiles.append(main + 'L31-' + cenc)
profiles.append(main + 'L40-' + cenc)
profiles.append(main + 'L41-' + cenc)
profiles.append(main + 'L50-' + cenc)
profiles.append(main + 'L51-' + cenc)
profiles.append(main10 + 'L30-' + cenc)
profiles.append(main10 + 'L31-' + cenc)
profiles.append(main10 + 'L40-' + cenc)
profiles.append(main10 + 'L41-' + cenc)
profiles.append(main10 + 'L50-' + cenc)
profiles.append(main10 + 'L51-' + cenc)
profiles.append(main10 + 'L30-' + prk)
profiles.append(main10 + 'L31-' + prk)
profiles.append(main10 + 'L40-' + prk)
profiles.append(main10 + 'L41-' + prk)
profiles.append(main + 'L30-L31-' + ctl)
profiles.append(main + 'L31-L40-' + ctl)
profiles.append(main + 'L40-L41-' + ctl)
profiles.append(main + 'L50-L51-' + ctl)
profiles.append(main10 + 'L30-L31-' + ctl)
profiles.append(main10 + 'L31-L40-' + ctl)
profiles.append(main10 + 'L40-L41-' + ctl)
profiles.append(main10 + 'L50-L51-' + ctl)
if hdr is True:
hdr = 'hevc-hdr-main10-'
profiles.append(hdr + 'L30-' + cenc)
profiles.append(hdr + 'L31-' + cenc)
profiles.append(hdr + 'L40-' + cenc)
profiles.append(hdr + 'L41-' + cenc)
profiles.append(hdr + 'L50-' + cenc)
profiles.append(hdr + 'L51-' + cenc)
profiles.append(hdr + 'L30-' + prk)
profiles.append(hdr + 'L31-' + prk)
profiles.append(hdr + 'L40-' + prk)
profiles.append(hdr + 'L41-' + prk)
profiles.append(hdr + 'L50-' + prk)
profiles.append(hdr + 'L51-' + prk)
if dolbyvision is True:
dv = 'hevc-dv-main10-'
dv5 = 'hevc-dv5-main10-'
profiles.append(dv + 'L30-' + cenc)
profiles.append(dv + 'L31-' + cenc)
profiles.append(dv + 'L40-' + cenc)
profiles.append(dv + 'L41-' + cenc)
profiles.append(dv + 'L50-' + cenc)
profiles.append(dv + 'L51-' + cenc)
profiles.append(dv5 + 'L30-' + prk)
profiles.append(dv5 + 'L31-' + prk)
profiles.append(dv5 + 'L40-' + prk)
profiles.append(dv5 + 'L41-' + prk)
profiles.append(dv5 + 'L50-' + prk)
profiles.append(dv5 + 'L51-' + prk)
if hevc is False or vp9 is True:
profiles.append('vp9-profile0-L30-dash-cenc')
profiles.append('vp9-profile0-L31-dash-cenc')
# Check if dolby sound is enabled and add to profles
if dolby:
profiles.append('ddplus-2.0-dash')
profiles.append('ddplus-5.1-dash')
manifest_request_data["params"]["profiles"] = profiles
print manifest_request_data
request_data = self.__generate_msl_request_data(manifest_request_data)
try:
resp = self.session.post(self.endpoints['manifest'], request_data)
except:
resp = None
exc = sys.exc_info()
msg = '[MSL][POST] Error {} {}'
self.nx_common.log(msg=msg.format(exc[0], exc[1]))
if resp:
try:
# if the json() does not fail we have an error because
# the manifest response is a chuncked json response
resp.json()
self.nx_common.log(
msg='Error getting Manifest: ' + resp.text)
return False
except ValueError:
# json() failed so parse the chunked response
#self.nx_common.log(
# msg='Got chunked Manifest Response: ' + resp.text)
resp = self.__parse_chunked_msl_response(resp.text)
#self.nx_common.log(
# msg='Parsed chunked Response: ' + json.dumps(resp))
data = self.__decrypt_payload_chunks(resp['payloads'])
return self.__tranform_to_dash(data)
return False
def get_license(self, challenge, sid):
"""
Requests and returns a license for the given challenge and sid
:param challenge: The base64 encoded challenge
:param sid: The sid paired to the challengew
:return: Base64 representation of the licensekey or False unsuccessfull
"""
esn = self.nx_common.get_esn()
id = int(time.time() * 10000)
'''license_request_data = {
'version': 2,
'url': self.last_license_url,
'id': id,
'esn': esn,
'languages': ['de-US'],
'uiVersion': 'shakti-v25d2fa21',
'clientVersion': '6.0011.511.011',
'params': [{
'sessionId': sid,
'clientTime': int(id / 10000),
'challengeBase64': challenge,
'xid': str(id + 1610)
}],
'echo': 'sessionId'
}'''
license_request_data = {
'method': 'license',
'licenseType': 'STANDARD',
'clientVersion': '4.0004.899.011',
'uiVersion': 'akira',
'languages': ['de-DE'],
'playbackContextId': self.last_playback_context,
'drmContextIds': [self.last_drm_context],
'challenges': [{
'dataBase64': challenge,
'sessionId': sid
}],
'clientTime': int(id / 10000),
'xid': id + 1610
}
#print license_request_data
request_data = self.__generate_msl_request_data(license_request_data)
try:
resp = self.session.post(self.endpoints['license'], request_data)
except:
resp = None
exc = sys.exc_info()
self.nx_common.log(
msg='[MSL][POST] Error {} {}'.format(exc[0], exc[1]))
print resp
if resp:
try:
# If is valid json the request for the licnese failed
resp.json()
self.nx_common.log(msg='Error getting license: '+resp.text)
return False
except ValueError:
# json() failed so we have a chunked json response
resp = self.__parse_chunked_msl_response(resp.text)
data = self.__decrypt_payload_chunks(resp['payloads'])
if data['success'] is True:
return data['result']['licenses'][0]['data']
else:
self.nx_common.log(
msg='Error getting license: ' + json.dumps(data))
return False
return False
def __decrypt_payload_chunks(self, payloadchunks):
decrypted_payload = ''
for chunk in payloadchunks:
payloadchunk = json.JSONDecoder().decode(chunk)
payload = payloadchunk.get('payload')
decoded_payload = base64.standard_b64decode(payload)
encryption_envelope = json.JSONDecoder().decode(decoded_payload)
# Decrypt the text
plaintext = self.crypto.decrypt(base64.standard_b64decode(encryption_envelope['iv']),
base64.standard_b64decode(encryption_envelope.get('ciphertext')))
# unpad the plaintext
plaintext = json.JSONDecoder().decode(plaintext)
data = plaintext.get('data')
# uncompress data if compressed
if plaintext.get('compressionalgo') == 'GZIP':
decoded_data = base64.standard_b64decode(data)
data = zlib.decompress(decoded_data, 16 + zlib.MAX_WBITS)
else:
data = base64.standard_b64decode(data)
decrypted_payload += data
decrypted_payload = json.JSONDecoder().decode(decrypted_payload)[1]['payload']
if 'json' in decrypted_payload:
return decrypted_payload['json']['result']
else:
decrypted_payload = base64.standard_b64decode(decrypted_payload['data'])
return json.JSONDecoder().decode(decrypted_payload)
def __tranform_to_dash(self, manifest):
self.nx_common.save_file(
data_path=self.nx_common.data_path,
filename='manifest.json',
content=json.dumps(manifest))
self.last_license_url = manifest['links']['ldl']['href']
self.last_playback_context = manifest['playbackContextId']
self.last_drm_context = manifest['drmContextId']
seconds = manifest['duration'] / 1000
init_length = seconds / 2 * 12 + 20 * 1000
duration = "PT" + str(seconds) + ".00S"
root = ET.Element('MPD')
root.attrib['xmlns'] = 'urn:mpeg:dash:schema:mpd:2011'
root.attrib['xmlns:cenc'] = 'urn:mpeg:cenc:2013'
root.attrib['mediaPresentationDuration'] = duration
period = ET.SubElement(root, 'Period', start='PT0S', duration=duration)
# One Adaption Set for Video
for video_track in manifest['video_tracks']:
video_adaption_set = ET.SubElement(
parent=period,
tag='AdaptationSet',
mimeType='video/mp4',
contentType="video")
# Content Protection
keyid = None
pssh = None
if 'drmHeader' in video_track:
keyid = video_track['drmHeader']['keyId']
pssh = video_track['drmHeader']['bytes']
if keyid:
protection = ET.SubElement(
parent=video_adaption_set,
tag='ContentProtection',
value='cenc',
schemeIdUri='urn:mpeg:dash:mp4protection:2011')
protection.set('cenc:default_KID', str(uuid.UUID(bytes=base64.standard_b64decode(keyid))))
protection = ET.SubElement(
parent=video_adaption_set,
tag='ContentProtection',
schemeIdUri='urn:uuid:EDEF8BA9-79D6-4ACE-A3C8-27DCD51D21ED')
ET.SubElement(
parent=protection,
tag='widevine:license',
robustness_level='HW_SECURE_CODECS_REQUIRED')
if pssh:
ET.SubElement(protection, 'cenc:pssh').text = pssh
for stream in video_track['streams']:
codec = 'h264'
if 'hevc' in stream['content_profile']:
codec = 'hevc'
elif stream['content_profile'] == 'vp9-profile0-L30-dash-cenc':
codec = 'vp9.0.30'
elif stream['content_profile'] == 'vp9-profile0-L31-dash-cenc':
codec = 'vp9.0.31'
hdcp_versions = '0.0'
#for hdcp in stream['hdcpVersions']:
# if hdcp != 'none':
# hdcp_versions = hdcp if hdcp != 'any' else '1.0'
rep = ET.SubElement(
parent=video_adaption_set,
tag='Representation',
width=str(stream['res_w']),
height=str(stream['res_h']),
bandwidth=str(stream['bitrate']*1024),
frameRate='%d/%d' % (stream['framerate_value'], stream['framerate_scale']),
hdcp=hdcp_versions,
nflxContentProfile=str(stream['content_profile']),
codecs=codec,
mimeType='video/mp4')
# BaseURL
base_url = self.__get_base_url(stream['urls'])
ET.SubElement(rep, 'BaseURL').text = base_url
# Init an Segment block
if 'startByteOffset' in stream:
initSize = stream['startByteOffset']
else:
sidx = stream['sidx']
initSize = sidx['offset'] + sidx['size']
segment_base = ET.SubElement(
parent=rep,
tag='SegmentBase',
indexRange='0-' + str(initSize),
indexRangeExact='true')
# Multiple Adaption Set for audio
languageMap = {}
channelCount = {'1.0':'1', '2.0':'2', '5.1':'6', '7.1':'8'}
for audio_track in manifest['audio_tracks']:
impaired = 'true' if audio_track['trackType'] == 'ASSISTIVE' else 'false'
original = 'true' if audio_track['isNative'] else 'false'
default = 'false' if audio_track['language'] in languageMap else 'true'
languageMap[audio_track['language']] = True
audio_adaption_set = ET.SubElement(
parent=period,
tag='AdaptationSet',
lang=audio_track['language'],
contentType='audio',
mimeType='audio/mp4',
impaired=impaired,
original=original,
default=default)
for stream in audio_track['streams']:
codec = 'aac'
#self.nx_common.log(msg=stream)
is_dplus2 = stream['content_profile'] == 'ddplus-2.0-dash'
is_dplus5 = stream['content_profile'] == 'ddplus-5.1-dash'
if is_dplus2 or is_dplus5:
codec = 'ec-3'
#self.nx_common.log(msg='codec is: ' + codec)
rep = ET.SubElement(
parent=audio_adaption_set,
tag='Representation',
codecs=codec,
bandwidth=str(stream['bitrate']*1024),
mimeType='audio/mp4')
# AudioChannel Config
ET.SubElement(
parent=rep,
tag='AudioChannelConfiguration',
schemeIdUri='urn:mpeg:dash:23003:3:audio_channel_configuration:2011',
value=channelCount[stream['channels']])
# BaseURL
base_url = self.__get_base_url(stream['urls'])
ET.SubElement(rep, 'BaseURL').text = base_url
# Index range
segment_base = ET.SubElement(
parent=rep,
tag='SegmentBase',
indexRange='0-' + str(init_length),
indexRangeExact='true')
# Multiple Adaption Sets for subtiles
for text_track in manifest.get('timedtexttracks'):
if text_track['isNoneTrack']:
continue
# Only one subtitle representation per adaptationset
downloadable = text_track['ttDownloadables']
content_profile = downloadable.keys()[0]
subtiles_adaption_set = ET.SubElement(
parent=period,
tag='AdaptationSet',
lang=text_track.get('language'),
codecs='wvtt' if content_profile == 'webvtt-lssdh-ios8' else 'stpp',
contentType='text',
mimeType='text/vtt' if content_profile == 'webvtt-lssdh-ios8' else 'application/ttml+xml')
role = ET.SubElement(
parent=subtiles_adaption_set,
tag = 'Role',
schemeIdUri = 'urn:mpeg:dash:role:2011',
value = 'forced' if text_track.get('isForcedNarrative') else 'main')
rep = ET.SubElement(
parent=subtiles_adaption_set,
tag='Representation',
nflxProfile=content_profile)
base_url = downloadable[content_profile]['downloadUrls'].values()[0]
ET.SubElement(rep, 'BaseURL').text = base_url
xml = ET.tostring(root, encoding='utf-8', method='xml')
xml = xml.replace('\n', '').replace('\r', '')
self.nx_common.save_file(
data_path=self.nx_common.data_path,
filename='manifest.mpd',
content=xml)
return xml
def __get_base_url(self, urls):
for url in urls:
return url['url']
def __parse_chunked_msl_response(self, message):
header = message.split('}}')[0] + '}}'
payloads = re.split(',\"signature\":\"[0-9A-Za-z=/+]+\"}', message.split('}}')[1])
payloads = [x + '}' for x in payloads][:-1]
return {
'header': header,
'payloads': payloads
}
def __generate_msl_request_data(self, data):
#self.__load_msl_data()
header_encryption_envelope = self.__encrypt(
plaintext=self.__generate_msl_header())
headerdata = base64.standard_b64encode(header_encryption_envelope)
header = {
'headerdata': headerdata,
'signature': self.__sign(header_encryption_envelope),
'mastertoken': self.mastertoken,
}
# Serialize the given Data
raw_marshalled_data = json.dumps(data)
marshalled_data = raw_marshalled_data.replace('"', '\\"')
serialized_data = '[{},{"headers":{},"path":"/cbp/cadmium-13"'
serialized_data += ',"payload":{"data":"'
serialized_data += marshalled_data
serialized_data += '"},"query":""}]\n'
compressed_data = self.__compress_data(serialized_data)
# Create FIRST Payload Chunks
first_payload = {
'messageid': self.current_message_id,
'data': compressed_data,
'compressionalgo': 'GZIP',
'sequencenumber': 1,
'endofmsg': True
}
first_payload_encryption_envelope = self.__encrypt(
plaintext=json.dumps(first_payload))
payload = base64.standard_b64encode(first_payload_encryption_envelope)
first_payload_chunk = {
'payload': payload,
'signature': self.__sign(first_payload_encryption_envelope),
}
request_data = json.dumps(header) + json.dumps(first_payload_chunk)
return request_data
def __compress_data(self, data):
# GZIP THE DATA
out = StringIO()
with gzip.GzipFile(fileobj=out, mode="w") as f:
f.write(data)
return base64.standard_b64encode(out.getvalue())
def __generate_msl_header(
self,
is_handshake=False,
is_key_request=False,
compressionalgo='GZIP',
encrypt=True):
"""
Function that generates a MSL header dict
:return: The base64 encoded JSON String of the header
"""
self.current_message_id = self.rndm.randint(0, pow(2, 52))
esn = self.nx_common.get_esn()
# Add compression algo if not empty
compression_algos = [compressionalgo] if compressionalgo != '' else []
header_data = {
'sender': esn,
'handshake': is_handshake,
'nonreplayable': False,
'capabilities': {
'languages': ['en-US'],
'compressionalgos': compression_algos
},
'recipient': 'Netflix',
'renewable': True,
'messageid': self.current_message_id,
'timestamp': 1467733923
}
# If this is a keyrequest act diffrent then other requests
if is_key_request:
header_data['keyrequestdata'] = self.crypto.get_key_request()
else:
if 'usertoken' in self.tokens:
pass
else:
account = self.nx_common.get_credentials()
# Auth via email and password
header_data['userauthdata'] = {
'scheme': 'EMAIL_PASSWORD',
'authdata': {
'email': account['email'],
'password': account['password']
}
}
return json.dumps(header_data)
def __encrypt(self, plaintext):
return json.dumps(self.crypto.encrypt(plaintext, self.nx_common.get_esn(), self.sequence_number))
def __sign(self, text):
"""
Calculates the HMAC signature for the given
text with the current sign key and SHA256
:param text:
:return: Base64 encoded signature
"""
return base64.standard_b64encode(self.crypto.sign(text))
def perform_key_handshake(self):
self.__perform_key_handshake()
def __perform_key_handshake(self):
esn = self.nx_common.get_esn()
self.nx_common.log(msg='perform_key_handshake: esn:' + esn)
if not esn:
return False
header = self.__generate_msl_header(
is_key_request=True,
is_handshake=True,
compressionalgo='',
encrypt=False)
request = {
'entityauthdata': {
'scheme': 'NONE',
'authdata': {
'identity': esn
}
},
'headerdata': base64.standard_b64encode(header),
'signature': '',
}
#self.nx_common.log(msg='Key Handshake Request:')
#self.nx_common.log(msg=json.dumps(request))
try:
resp = self.session.post(
url=self.endpoints['manifest'],
data=json.dumps(request, sort_keys=True))
except:
resp = None
exc = sys.exc_info()
self.nx_common.log(
msg='[MSL][POST] Error {} {}'.format(exc[0], exc[1]))
if resp and resp.status_code == 200:
resp = resp.json()
if 'errordata' in resp:
self.nx_common.log(msg='Key Exchange failed')
self.nx_common.log(
msg=base64.standard_b64decode(resp['errordata']))
return False
base_head = base64.standard_b64decode(resp['headerdata'])
headerdata=json.JSONDecoder().decode(base_head)
self.__set_master_token(headerdata['keyresponsedata']['mastertoken'])
self.crypto.parse_key_response(headerdata)
self.__save_msl_data()
else:
self.nx_common.log(msg='Key Exchange failed')
self.nx_common.log(msg=resp.text)
def init_msl_data(self):
self.nx_common.log(msg='MSL Data exists. Use old Tokens.')
self.__load_msl_data()
self.handshake_performed = True
def __load_msl_data(self):
raw_msl_data = self.nx_common.load_file(
data_path=self.nx_common.data_path,
filename='msl_data.json')
msl_data = json.JSONDecoder().decode(raw_msl_data)
# Check expire date of the token
raw_token = msl_data['tokens']['mastertoken']['tokendata']
base_token = base64.standard_b64decode(raw_token)
master_token = json.JSONDecoder().decode(base_token)
exp = int(master_token['expiration'])
valid_until = datetime.utcfromtimestamp(exp)
present = datetime.now()
difference = valid_until - present
# If token expires in less then 10 hours or is expires renew it
self.nx_common.log(msg='Expiration time: Key:' + str(valid_until) + ', Now:' + str(present) + ', Diff:' + str(difference.total_seconds()))
difference = difference.total_seconds() / 60 / 60
if self.crypto.fromDict(msl_data) or difference < 10:
self.__perform_key_handshake()
return
self.__set_master_token(msl_data['tokens']['mastertoken'])
def save_msl_data(self):
self.__save_msl_data()
def __save_msl_data(self):
"""
Saves the keys and tokens in json file
:return:
"""
data = {
'tokens': {
'mastertoken': self.mastertoken
}
}
data.update(self.crypto.toDict())
serialized_data = json.JSONEncoder().encode(data)
self.nx_common.save_file(
data_path=self.nx_common.data_path,
filename='msl_data.json',
content=serialized_data)
def __set_master_token(self, master_token):
self.mastertoken = master_token
raw_token = master_token['tokendata']
base_token = base64.standard_b64decode(raw_token)
decoded_token = json.JSONDecoder().decode(base_token)
self.sequence_number = decoded_token.get('sequencenumber')
| gpl-2.0 |
donnerluetjen/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/srvs.py | 216 | 3017 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
ROS Service Description Language Spec
Implements http://ros.org/wiki/srv
"""
import os
import sys
from . names import is_legal_resource_name, is_legal_resource_base_name, package_resource_name, resource_name
class SrvSpec(object):
def __init__(self, request, response, text, full_name = '', short_name = '', package = ''):
alt_package, alt_short_name = package_resource_name(full_name)
if not package:
package = alt_package
if not short_name:
short_name = alt_short_name
self.request = request
self.response = response
self.text = text
self.full_name = full_name
self.short_name = short_name
self.package = package
def __eq__(self, other):
if not other or not isinstance(other, SrvSpec):
return False
return self.request == other.request and \
self.response == other.response and \
self.text == other.text and \
self.full_name == other.full_name and \
self.short_name == other.short_name and \
self.package == other.package
def __ne__(self, other):
if not other or not isinstance(other, SrvSpec):
return True
return not self.__eq__(other)
def __repr__(self):
return "SrvSpec[%s, %s]"%(repr(self.request), repr(self.response))
| gpl-3.0 |
PennartLoettring/Poettrix | rootfs/usr/lib/python3.4/test/test_json/test_encode_basestring_ascii.py | 101 | 2146 | from collections import OrderedDict
from test.test_json import PyTest, CTest
CASES = [
('/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
('controls', '"controls"'),
('\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
('{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
(' s p a c e d ', '" s p a c e d "'),
('\U0001d120', '"\\ud834\\udd20"'),
('\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\u03b1\u03a9', '"\\u03b1\\u03a9"'),
('\u03b1\u03a9', '"\\u03b1\\u03a9"'),
("`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
('\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
('\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBasestringAscii:
def test_encode_basestring_ascii(self):
fname = self.json.encoder.encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = self.json.encoder.encode_basestring_ascii(input_string)
self.assertEqual(result, expect,
'{0!r} != {1!r} for {2}({3!r})'.format(
result, expect, fname, input_string))
def test_ordered_dict(self):
# See issue 6105
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = self.dumps(OrderedDict(items))
self.assertEqual(s, '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}')
def test_sorted_dict(self):
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = self.dumps(dict(items), sort_keys=True)
self.assertEqual(s, '{"five": 5, "four": 4, "one": 1, "three": 3, "two": 2}')
class TestPyEncodeBasestringAscii(TestEncodeBasestringAscii, PyTest): pass
class TestCEncodeBasestringAscii(TestEncodeBasestringAscii, CTest): pass
| gpl-2.0 |
misterhat/youtube-dl | youtube_dl/extractor/exfm.py | 15 | 2060 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class ExfmIE(InfoExtractor):
IE_NAME = 'exfm'
IE_DESC = 'ex.fm'
_VALID_URL = r'https?://(?:www\.)?ex\.fm/song/(?P<id>[^/]+)'
_SOUNDCLOUD_URL = r'http://(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream'
_TESTS = [
{
'url': 'http://ex.fm/song/eh359',
'md5': 'e45513df5631e6d760970b14cc0c11e7',
'info_dict': {
'id': '44216187',
'ext': 'mp3',
'title': 'Test House "Love Is Not Enough" (Extended Mix) DeadJournalist Exclusive',
'uploader': 'deadjournalist',
'upload_date': '20120424',
'description': 'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
},
'note': 'Soundcloud song',
'skip': 'The site is down too often',
},
{
'url': 'http://ex.fm/song/wddt8',
'md5': '966bd70741ac5b8570d8e45bfaed3643',
'info_dict': {
'id': 'wddt8',
'ext': 'mp3',
'title': 'Safe and Sound',
'uploader': 'Capital Cities',
},
'skip': 'The site is down too often',
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
song_id = mobj.group('id')
info_url = 'http://ex.fm/api/v3/song/%s' % song_id
info = self._download_json(info_url, song_id)['song']
song_url = info['url']
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
self.to_screen('Soundcloud song detected')
return self.url_result(song_url.replace('/stream', ''), 'Soundcloud')
return {
'id': song_id,
'url': song_url,
'ext': 'mp3',
'title': info['title'],
'thumbnail': info['image']['large'],
'uploader': info['artist'],
'view_count': info['loved_count'],
}
| unlicense |
SickGear/SickGear | lib/apprise/plugins/NotifyGrowl/gntp/config.py | 2 | 2162 | # Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
"""
The gntp.config module is provided as an extended GrowlNotifier object that takes
advantage of the ConfigParser module to allow us to setup some default values
(such as hostname, password, and port) in a more global way to be shared among
programs using gntp
"""
import logging
import os
from . import notifier
from . import shim
__all__ = [
'mini',
'GrowlNotifier'
]
logger = logging.getLogger(__name__)
class GrowlNotifier(notifier.GrowlNotifier):
"""
ConfigParser enhanced GrowlNotifier object
For right now, we are only interested in letting users overide certain
values from ~/.gntp
::
[gntp]
hostname = ?
password = ?
port = ?
"""
def __init__(self, *args, **kwargs):
config = shim.RawConfigParser({
'hostname': kwargs.get('hostname', 'localhost'),
'password': kwargs.get('password'),
'port': kwargs.get('port', 23053),
})
config.read([os.path.expanduser('~/.gntp')])
# If the file does not exist, then there will be no gntp section defined
# and the config.get() lines below will get confused. Since we are not
# saving the config, it should be safe to just add it here so the
# code below doesn't complain
if not config.has_section('gntp'):
logger.info('Error reading ~/.gntp config file')
config.add_section('gntp')
kwargs['password'] = config.get('gntp', 'password')
kwargs['hostname'] = config.get('gntp', 'hostname')
kwargs['port'] = config.getint('gntp', 'port')
super(GrowlNotifier, self).__init__(*args, **kwargs)
def mini(description, **kwargs):
"""Single notification function
Simple notification function in one line. Has only one required parameter
and attempts to use reasonable defaults for everything else
:param string description: Notification message
"""
kwargs['notifierFactory'] = GrowlNotifier
notifier.mini(description, **kwargs)
if __name__ == '__main__':
# If we're running this module directly we're likely running it as a test
# so extra debugging is useful
logging.basicConfig(level=logging.INFO)
mini('Testing mini notification')
| gpl-3.0 |
hlin117/statsmodels | statsmodels/tools/tests/test_tools.py | 26 | 18818 | """
Test functions for models.tools
"""
from statsmodels.compat.python import lrange, range
import numpy as np
from numpy.random import standard_normal
from numpy.testing import (assert_equal, assert_array_equal,
assert_almost_equal, assert_string_equal, TestCase)
from nose.tools import (assert_true, assert_false, assert_raises)
from statsmodels.datasets import longley
from statsmodels.tools import tools
from statsmodels.tools.tools import pinv_extended
from statsmodels.compat.numpy import np_matrix_rank
class TestTools(TestCase):
def test_add_constant_list(self):
x = lrange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_1d(self):
x = np.arange(1,5)
x = tools.add_constant(x)
y = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
assert_equal(x, y)
def test_add_constant_has_constant1d(self):
x = np.ones(5)
x = tools.add_constant(x, has_constant='skip')
assert_equal(x, np.ones(5))
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.ones((5, 2)))
def test_add_constant_has_constant2d(self):
x = np.asarray([[1,1,1,1],[1,2,3,4.]]).T
y = tools.add_constant(x, has_constant='skip')
assert_equal(x, y)
assert_raises(ValueError, tools.add_constant, x, has_constant='raise')
assert_equal(tools.add_constant(x, has_constant='add'),
np.column_stack((np.ones(4), x)))
def test_recipr(self):
X = np.array([[2,1],[-1,0]])
Y = tools.recipr(X)
assert_almost_equal(Y, np.array([[0.5,1],[0,0]]))
def test_recipr0(self):
X = np.array([[2,1],[-4,0]])
Y = tools.recipr0(X)
assert_almost_equal(Y, np.array([[0.5,1],[-0.25,0]]))
def test_rank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
self.assertEquals(tools.rank(X), np_matrix_rank(X))
X[:,0] = X[:,1] + X[:,2]
self.assertEquals(tools.rank(X), np_matrix_rank(X))
def test_extendedpinv(self):
X = standard_normal((40, 10))
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_extendedpinv_singular(self):
X = standard_normal((40, 10))
X[:, 5] = X[:, 1] + X[:, 3]
np_inv = np.linalg.pinv(X)
np_sing_vals = np.linalg.svd(X, 0, 0)
sm_inv, sing_vals = pinv_extended(X)
assert_almost_equal(np_inv, sm_inv)
assert_almost_equal(np_sing_vals, sing_vals)
def test_fullrank(self):
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X = standard_normal((40,10))
X[:,0] = X[:,1] + X[:,2]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,9))
self.assertEquals(tools.rank(Y), 9)
X[:,5] = X[:,3] + X[:,4]
Y = tools.fullrank(X)
self.assertEquals(Y.shape, (40,8))
warnings.simplefilter("ignore")
self.assertEquals(tools.rank(Y), 8)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
isestimable = tools.isestimable
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
class TestCategoricalNumerical(object):
#TODO: use assert_raises to check that bad inputs are taken care of
def __init__(self):
#import string
stringabc = 'abcdefghijklmnopqrstuvwxy'
self.des = np.random.randn(25,2)
self.instr = np.floor(np.arange(10,60, step=2)/10)
x=np.zeros((25,5))
x[:5,0]=1
x[5:10,1]=1
x[10:15,2]=1
x[15:20,3]=1
x[20:25,4]=1
self.dummy = x
structdes = np.zeros((25,1),dtype=[('var1', 'f4'),('var2', 'f4'),
('instrument','f4'),('str_instr','a10')])
structdes['var1'] = self.des[:,0][:,None]
structdes['var2'] = self.des[:,1][:,None]
structdes['instrument'] = self.instr[:,None]
string_var = [stringabc[0:5], stringabc[5:10],
stringabc[10:15], stringabc[15:20],
stringabc[20:25]]
string_var *= 5
self.string_var = np.array(sorted(string_var))
structdes['str_instr'] = self.string_var[:,None]
self.structdes = structdes
self.recdes = structdes.view(np.recarray)
def test_array2d(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],10)
def test_array1d(self):
des = tools.categorical(self.instr)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],6)
def test_array2d_drop(self):
des = np.column_stack((self.des, self.instr, self.des))
des = tools.categorical(des, col=2, drop=True)
assert_array_equal(des[:,-5:], self.dummy)
assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.instr, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='instrument')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['instrument'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='instrument')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=2)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='instrument', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['instrument'].view(dtype=[('var1', 'f4')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
# def test_arraylike2d(self):
# des = tools.categorical(self.structdes.tolist(), col=2)
# test_des = des[:,-5:]
# assert_array_equal(test_des, self.dummy)
# assert_equal(des.shape[1], 9)
# def test_arraylike1d(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr)
# test_dum = dum[:,-5:]
# assert_array_equal(test_dum, self.dummy)
# assert_equal(dum.shape[1], 6)
# def test_arraylike2d_drop(self):
# des = tools.categorical(self.structdes.tolist(), col=2, drop=True)
# test_des = des[:,-5:]
# assert_array_equal(test__des, self.dummy)
# assert_equal(des.shape[1], 8)
# def test_arraylike1d_drop(self):
# instr = self.structdes['instrument'].tolist()
# dum = tools.categorical(instr, drop=True)
# assert_array_equal(dum, self.dummy)
# assert_equal(dum.shape[1], 5)
class TestCategoricalString(TestCategoricalNumerical):
# comment out until we have type coercion
# def test_array2d(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],10)
# def test_array1d(self):
# des = tools.categorical(self.instr)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],6)
# def test_array2d_drop(self):
# des = np.column_stack((self.des, self.instr, self.des))
# des = tools.categorical(des, col=2, drop=True)
# assert_array_equal(des[:,-5:], self.dummy)
# assert_equal(des.shape[1],9)
def test_array1d_drop(self):
des = tools.categorical(self.string_var, drop=True)
assert_array_equal(des, self.dummy)
assert_equal(des.shape[1],5)
def test_recarray2d(self):
des = tools.categorical(self.recdes, col='str_instr')
# better way to do this?
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray2dint(self):
des = tools.categorical(self.recdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_recarray1d(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_recarray1d_drop(self):
instr = self.structdes['str_instr'].view(np.recarray)
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_recarray2d_drop(self):
des = tools.categorical(self.recdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray2d(self):
des = tools.categorical(self.structdes, col='str_instr')
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray2dint(self):
des = tools.categorical(self.structdes, col=3)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 9)
def test_structarray1d(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names[-5:]]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 6)
def test_structarray2d_drop(self):
des = tools.categorical(self.structdes, col='str_instr', drop=True)
test_des = np.column_stack(([des[_] for _ in des.dtype.names[-5:]]))
assert_array_equal(test_des, self.dummy)
assert_equal(len(des.dtype.names), 8)
def test_structarray1d_drop(self):
instr = self.structdes['str_instr'].view(dtype=[('var1', 'a10')])
dum = tools.categorical(instr, drop=True)
test_dum = np.column_stack(([dum[_] for _ in dum.dtype.names]))
assert_array_equal(test_dum, self.dummy)
assert_equal(len(dum.dtype.names), 5)
def test_arraylike2d(self):
pass
def test_arraylike1d(self):
pass
def test_arraylike2d_drop(self):
pass
def test_arraylike1d_drop(self):
pass
def test_rec_issue302():
arr = np.rec.fromrecords([[10], [11]], names='group')
actual = tools.categorical(arr)
expected = np.rec.array([(10, 1.0, 0.0), (11, 0.0, 1.0)],
dtype=[('group', int), ('group_10', float), ('group_11', float)])
assert_array_equal(actual, expected)
def test_issue302():
arr = np.rec.fromrecords([[10, 12], [11, 13]], names=['group', 'whatever'])
actual = tools.categorical(arr, col=['group'])
expected = np.rec.array([(10, 12, 1.0, 0.0), (11, 13, 0.0, 1.0)],
dtype=[('group', int), ('whatever', int), ('group_10', float),
('group_11', float)])
assert_array_equal(actual, expected)
def test_pandas_const_series():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=False)
assert_string_equal('const', series.columns[1])
assert_equal(series.var(0)[1], 0)
def test_pandas_const_series_prepend():
dta = longley.load_pandas()
series = dta.exog['GNP']
series = tools.add_constant(series, prepend=True)
assert_string_equal('const', series.columns[0])
assert_equal(series.var(0)[0], 0)
def test_pandas_const_df():
dta = longley.load_pandas().exog
dta = tools.add_constant(dta, prepend=False)
assert_string_equal('const', dta.columns[-1])
assert_equal(dta.var(0)[-1], 0)
def test_pandas_const_df_prepend():
dta = longley.load_pandas().exog
# regression test for #1025
dta['UNEMP'] /= dta['UNEMP'].std()
dta = tools.add_constant(dta, prepend=True)
assert_string_equal('const', dta.columns[0])
assert_equal(dta.var(0)[0], 0)
def test_chain_dot():
A = np.arange(1,13).reshape(3,4)
B = np.arange(3,15).reshape(4,3)
C = np.arange(5,8).reshape(3,1)
assert_equal(tools.chain_dot(A,B,C), np.array([[1820],[4300],[6780]]))
class TestNanDot(object):
@classmethod
def setupClass(cls):
nan = np.nan
cls.mx_1 = np.array([[nan, 1.], [2., 3.]])
cls.mx_2 = np.array([[nan, nan], [2., 3.]])
cls.mx_3 = np.array([[0., 0.], [0., 0.]])
cls.mx_4 = np.array([[1., 0.], [1., 0.]])
cls.mx_5 = np.array([[0., 1.], [0., 1.]])
cls.mx_6 = np.array([[1., 2.], [3., 4.]])
def test_11(self):
test_res = tools.nan_dot(self.mx_1, self.mx_1)
expected_res = np.array([[ np.nan, np.nan], [ np.nan, 11.]])
assert_array_equal(test_res, expected_res)
def test_12(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_2)
expected_res = np.array([[ nan, nan], [ nan, nan]])
assert_array_equal(test_res, expected_res)
def test_13(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_14(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_1, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_41(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_4, self.mx_1)
expected_res = np.array([[ nan, 1.], [ nan, 1.]])
assert_array_equal(test_res, expected_res)
def test_23(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_3)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_32(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_3, self.mx_2)
expected_res = np.array([[ 0., 0.], [ 0., 0.]])
assert_array_equal(test_res, expected_res)
def test_24(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_4)
expected_res = np.array([[ nan, 0.], [ 5., 0.]])
assert_array_equal(test_res, expected_res)
def test_25(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_2, self.mx_5)
expected_res = np.array([[ 0., nan], [ 0., 5.]])
assert_array_equal(test_res, expected_res)
def test_66(self):
nan = np.nan
test_res = tools.nan_dot(self.mx_6, self.mx_6)
expected_res = np.array([[ 7., 10.], [ 15., 22.]])
assert_array_equal(test_res, expected_res)
| bsd-3-clause |
paweljasinski/ironpython3 | Src/StdLib/Lib/stat.py | 106 | 4400 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
"""Return the portion of the file's mode that can be set by
os.chmod().
"""
return mode & 0o7777
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFBLK = 0o060000 # block device
S_IFREG = 0o100000 # regular file
S_IFIFO = 0o010000 # fifo (named pipe)
S_IFLNK = 0o120000 # symbolic link
S_IFSOCK = 0o140000 # socket file
# Functions to test for each file type
def S_ISDIR(mode):
"""Return True if mode is from a directory."""
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
"""Return True if mode is from a character special device file."""
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
"""Return True if mode is from a block special device file."""
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
"""Return True if mode is from a regular file."""
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
"""Return True if mode is from a FIFO (named pipe)."""
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
"""Return True if mode is from a symbolic link."""
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
"""Return True if mode is from a socket."""
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 0o4000 # set UID bit
S_ISGID = 0o2000 # set GID bit
S_ENFMT = S_ISGID # file locking enforcement
S_ISVTX = 0o1000 # sticky bit
S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR
S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR
S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR
S_IRWXU = 0o0700 # mask for owner permissions
S_IRUSR = 0o0400 # read by owner
S_IWUSR = 0o0200 # write by owner
S_IXUSR = 0o0100 # execute by owner
S_IRWXG = 0o0070 # mask for group permissions
S_IRGRP = 0o0040 # read by group
S_IWGRP = 0o0020 # write by group
S_IXGRP = 0o0010 # execute by group
S_IRWXO = 0o0007 # mask for others (not in group) permissions
S_IROTH = 0o0004 # read by others
S_IWOTH = 0o0002 # write by others
S_IXOTH = 0o0001 # execute by others
# Names for file flags
UF_NODUMP = 0x00000001 # do not dump file
UF_IMMUTABLE = 0x00000002 # file may not be changed
UF_APPEND = 0x00000004 # file may only be appended to
UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack
UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted
UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
SF_ARCHIVED = 0x00010000 # file may be archived
SF_IMMUTABLE = 0x00020000 # file may not be changed
SF_APPEND = 0x00040000 # file may only be appended to
SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted
SF_SNAPSHOT = 0x00200000 # file is a snapshot file
_filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((S_IRUSR, "r"),),
((S_IWUSR, "w"),),
((S_IXUSR|S_ISUID, "s"),
(S_ISUID, "S"),
(S_IXUSR, "x")),
((S_IRGRP, "r"),),
((S_IWGRP, "w"),),
((S_IXGRP|S_ISGID, "s"),
(S_ISGID, "S"),
(S_IXGRP, "x")),
((S_IROTH, "r"),),
((S_IWOTH, "w"),),
((S_IXOTH|S_ISVTX, "t"),
(S_ISVTX, "T"),
(S_IXOTH, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form '-rwxrwxrwx'."""
perm = []
for table in _filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
# If available, use C implementation
try:
from _stat import *
except ImportError:
pass
| apache-2.0 |
sigmavirus24/requests-oauthlib | requests_oauthlib/oauth2_auth.py | 45 | 1505 | from __future__ import unicode_literals
from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError
from oauthlib.oauth2 import is_secure_transport
class OAuth2(object):
"""Adds proof of authorization (OAuth2 token) to the request."""
def __init__(self, client_id=None, client=None, token=None):
"""Construct a new OAuth 2 authorization object.
:param client_id: Client id obtained during registration
:param client: :class:`oauthlib.oauth2.Client` to be used. Default is
WebApplicationClient which is useful for any
hosted application but not mobile or desktop.
:param token: Token dictionary, must include access_token
and token_type.
"""
self._client = client or WebApplicationClient(client_id, token=token)
if token:
for k, v in token.items():
setattr(self._client, k, v)
def __call__(self, r):
"""Append an OAuth 2 token to the request.
Note that currently HTTPS is required for all requests. There may be
a token type that allows for plain HTTP in the future and then this
should be updated to allow plain HTTP on a white list basis.
"""
if not is_secure_transport(r.url):
raise InsecureTransportError()
r.url, r.headers, r.body = self._client.add_token(r.url,
http_method=r.method, body=r.body, headers=r.headers)
return r
| isc |
jfortier/guessit | guessit/transfo/guess_episodes_rexps.py | 10 | 4405 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2013 Nicolas Wack <wackou@gmail.com>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function, unicode_literals
from guessit.plugins.transformers import Transformer
from guessit.matcher import GuessFinder
from guessit.patterns import sep
from guessit.containers import PropertiesContainer, WeakValidator, NoValidator
from guessit.patterns.numeral import numeral, digital_numeral, parse_numeral
from re import split as re_split
class GuessEpisodesRexps(Transformer):
def __init__(self):
Transformer.__init__(self, 20)
self.container = PropertiesContainer(enhance=False, canonical_from_pattern=False)
def episode_parser(value):
values = re_split('[a-zA-Z]', value)
values = [x for x in values if x]
ret = []
for letters_elt in values:
dashed_values = letters_elt.split('-')
dashed_values = [x for x in dashed_values if x]
if len(dashed_values) > 1:
for _ in range(0, len(dashed_values) - 1):
start_dash_ep = parse_numeral(dashed_values[0])
end_dash_ep = parse_numeral(dashed_values[1])
for dash_ep in range(start_dash_ep, end_dash_ep + 1):
ret.append(dash_ep)
else:
ret.append(parse_numeral(letters_elt))
if len(ret) > 1:
return {None: ret[0], 'episodeList': ret} # TODO: Should support seasonList also
elif len(ret) > 0:
return ret[0]
else:
return None
self.container.register_property(None, r'((?:season|saison)' + sep + '?(?P<season>' + numeral + '))', confidence=1.0, formatter=parse_numeral)
self.container.register_property(None, r'(s(?P<season>' + digital_numeral + ')[^0-9]?' + sep + '?(?P<episodeNumber>(?:e' + digital_numeral + '(?:' + sep + '?[e-]' + digital_numeral + ')*)))[^0-9]', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser}, validator=NoValidator())
self.container.register_property(None, r'[^0-9]((?P<season>' + digital_numeral + ')[^0-9 .-]?-?(?P<episodeNumber>(?:x' + digital_numeral + '(?:' + sep + '?[x-]' + digital_numeral + ')*)))[^0-9]', confidence=1.0, formatter={None: parse_numeral, 'episodeNumber': episode_parser})
self.container.register_property(None, r'(s(?P<season>' + digital_numeral + '))[^0-9]', confidence=0.6, formatter=parse_numeral, validator=NoValidator())
self.container.register_property(None, r'((?P<episodeNumber>' + digital_numeral + ')v[23])', confidence=0.6, formatter=parse_numeral)
self.container.register_property(None, r'((?:ep)' + sep + r'(?P<episodeNumber>' + numeral + '))[^0-9]', confidence=0.7, formatter=parse_numeral)
self.container.register_property(None, r'(e(?P<episodeNumber>' + digital_numeral + '))', confidence=0.6, formatter=parse_numeral)
self.container.register_canonical_properties('other', 'FiNAL', 'Complete', validator=WeakValidator())
def supported_properties(self):
return ['episodeNumber', 'season']
def guess_episodes_rexps(self, string, node=None, options=None):
found = self.container.find_properties(string, node)
return self.container.as_guess(found, string)
def should_process(self, mtree, options=None):
return mtree.guess.get('type', '').startswith('episode')
def process(self, mtree, options=None):
GuessFinder(self.guess_episodes_rexps, None, self.log, options).process_nodes(mtree.unidentified_leaves())
| lgpl-3.0 |
Mimino666/tc-marathoner | setup.py | 1 | 2438 | import os
def fullsplit(path, result=None):
'''Split a pathname into components (the opposite of os.path.join)
in a platform-neutral way.
'''
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, package_data = [], {}
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('marathoner'):
# Ignore PEP 3147 cache dirs and those whose names start with '.'
dirnames[:] = [d for d in dirnames if not d.startswith('.') and d != '__pycache__']
parts = fullsplit(dirpath)
package_name = '.'.join(parts)
if '__init__.py' in filenames:
packages.append(package_name)
elif filenames:
relative_path = []
while '.'.join(parts) not in packages:
relative_path.append(parts.pop())
relative_path.reverse()
path = os.path.join(*relative_path)
package_files = package_data.setdefault('.'.join(parts), [])
package_files.extend([os.path.join(path, f) for f in filenames])
scripts = ['bin/marathoner']
if os.name == 'nt':
scripts.append('bin/marathoner.bat')
version = __import__('marathoner').__version__
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
name='marathoner',
version=version,
description='Testing tool for TopCoder marathon matches.',
long_description=readme,
author='Michal Mimino Danilak',
author_email='michal.danilak@gmail.com',
url='https://github.com/Mimino666/tc-marathoner',
packages=packages,
package_data=package_data,
scripts=scripts,
install_requires=[
'six',
],
license=license,
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Environment :: Console',
'Topic :: Scientific/Engineering',
],
)
| mit |
EliotBerriot/django | django/contrib/gis/utils/layermapping.py | 335 | 27300 | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.